code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import re
def findBeginAndEndIndexesOfRegex(line,regex):
"""
Find match start and match end by giving regex
:param line: String
:param regex: some regexpr
:return: begin and end of matching
>>> [(x['start'],x['end']) for x in findBeginAndEndIndexesOfRegex(line="find string stop in this stop sentance",regex=re.compile(r"st.*?p",re.MULTILINE))]
[(5, 16), (25, 29)]
>>> findBeginAndEndIndexesOfRegex(line="find string stop in this stop sentance",regex=re.compile(r"nostop",re.MULTILINE))
[]
"""
matches = re.finditer(regex, line)
str_matches = []
for matchNum, match in enumerate(matches):
matchNum = matchNum + 1
str_matches.append({'start': match.start(), 'end': match.end()})
return str_matches
def match_inside_matches_array(match, matches):
"""
Do we inside match area
:param match: one match with start and end fields
:param matches: array of matching data
:return: True if inside False othercase
>>> matches = [{'start': 0, 'end': 2}, {'start': 6, 'end': 8}, {'start': 11, 'end': 13}]
>>> match_inside_matches_array({'start':2,'end':5},matches)
False
>>> match_inside_matches_array({'start':0,'end':2},matches)
True
"""
for m in matches:
if match['start']>=m['start'] and match['end']<=m['end']:
return True
return False
def get_linux_commands():
return ['adduser', 'arch', 'awk', 'bc', 'cal','cd', 'cat', 'chdir', 'chgrp', 'chkconfig', 'chmod', 'chown', 'chroot', 'cksum', 'clear', 'cmp', 'comm', 'cp', 'cron', 'crontab', 'csplit', 'cut', 'date', 'dc', 'dd', 'df', 'diff', 'diff3', 'dir', 'dircolors', 'dirname', 'du', 'echo', 'ed', 'egrep', 'eject', 'env', 'expand', 'expr', 'factor', 'FALSE', 'fdformat', 'fdisk', 'fgrep', 'find', 'fmt', 'fold', 'format', 'free', 'fsck', 'gawk', 'grep', 'groups', 'gzip', 'head', 'hostname', 'id', 'info', 'install', 'join', 'kill', 'less', 'ln', 'locate', 'logname', 'lpc', 'lpr', 'lprm', 'ls', 'man', 'mkdir', 'mkfifo', 'mknod', 'more', 'mount', 'mv', 'nice', 'nl', 'nohup', 'passwd', 'paste', 'pathchk', 'pr', 'printcap', 'printenv', 'printf', 'ps', 'pwd', 'quota', 'quotacheck', 'quotactl', 'ram', 'rcp', 'rm', 'rmdir', 'rpm', 'rsync', 'screen', 'sdiff', 'sed', 'select', 'seq', 'shutdown', 'sleep', 'sort', 'split', 'su', 'sum', 'symlink', 'sync', 'tac', 'tail', 'tar', 'tee', 'test', 'time', 'touch', 'top', 'traceroute', 'tr', 'TRUE', 'tsort', 'tty', 'umount', 'uname', 'unexpand', 'uniq', 'units', 'unshar', 'useradd', 'usermod', 'users', 'uuencode', 'uudecode', 'vdir', 'watch', 'wc', 'whereis', 'which', 'who', 'whoami', 'xargs', 'yes'] | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/Utils.py | Utils.py |
import os,subprocess
import traceback
from Zpy.Pipeline import Pipeline
from Zpy.languages.LanguageAnalyzer import LanguageAnalyzer
class Processor():
def __init__(self):
self.pipeline = Pipeline()
self.language_analyzer = LanguageAnalyzer()
self.last_zcommand = ""
self.info = {
'pipes_count' : 0
}
def forward(self, line, stdin =""):
"""
Evaluate command by some language interpreter
:param line: line command
:param stdin: stdin value
:return: result of execution (for unix command we dont return nothing if lenght of pipe items = 1)
>>> import tempfile, os
>>> tempdir = tempfile.gettempdir()
>>> tmpfile = os.path.join(tempdir,'zpy_test.txt')
>>> proc = Processor()
>>> forward = proc.forward
>>> len(forward("['.', '..'] |[for] ls $z"))
2
>>> forward('"asd" |[for] z')
['asd']
>>> forward('j [2,3,4,5] |[for] j z + 15')
[17, 18, 19, 20]
>>> forward("~import os, re")
''
>>> forward("'123'*3")
'123123123'
>>> forward("[i + 1 for i in range(10)]")
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> forward("echo 'asd' | z")
'asd'
>>> forward("echo 'some_data' | (z.strip() + 'data') | cat > %s" % tmpfile )
''
>>> forward("cat %s | z" % tmpfile)
'some_datadata'
>>> forward("cd %s" % tempdir.strip())
''
>>> forward("pwd | True if len(os.listdir(z.strip())) > 0 else False ")
True
"""
#>>> forward('"https://www.reddit.com/r/books/" | `wget -qO- $z | re.findall(r"Book[^\.].*?",z,re.IGNORECASE) | True if len(z) > 0 else False')
#True
if len(line) == 0:
return
commands = self.pipeline.split(line=line)
self.info['pipes_count'] = len(commands)
for command in commands:
lang = self.language_analyzer.analize(command)
try:
stdin = lang.evaluate(command.strip(), self, stdin=stdin)
except SyntaxError as e:
print("Cannot evaluate line `%s`" % command.strip())
print(e)
except Exception as e:
traceback.print_exc()
if (isinstance(stdin, str) and stdin == "") or stdin is None:
pass
else:
if isinstance(stdin,str):
stdin = stdin.strip()
self.last_zcommand = line
return stdin | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/Processor.py | Processor.py |
from Zpy.modules.helpful.z import z_base
class zpy(z_base):
#Constants
SCRIPTS = "SCRIPTS_PY"
MODULE = "MODULE_PY"
DEFAULT_IMPORTS = "DEFAULT_IMPORTS_PY"
def __init__(self, processor):
super().__init__(processor)
"""
Scripts
"""
def get_scripts(self):
return self.get_section(section=self.SCRIPTS)
def script(self,name):
script_section = self.get_script_section_and_config()['section']
return script_section[name]
def add_script(self, name):
def wrap(zpy_input):
return self.add_new_script(name=name, script=zpy_input)
return wrap
def remove_script(self, name):
return self.remove_from_section(self.SCRIPTS,name)
def add_new_script(self, name, script):
return self.add_to_section(self.SCRIPTS, name, value=script)
"""
Utils
"""
def as_table(self, line):
"""
:param line: Convert data which splited by '\n' or array into beautiful table splited by '\n'
:return: beautified table
>>> len(zpy(processor=None).as_table([[0, 1, 4, 9, 16], [25, 36, 49, 64, 81]])) > 0
True
"""
from terminaltables import AsciiTable, SingleTable
if isinstance(line, str):
arr = line.split('\n')
prepared_arr = [' '.join(x.split()).split(' ') for x in arr]
else: #Iterable
prepared_arr = line
##Todo refactor
#prepared_arr = [' '.join(x.split() if isinstance(x,str) else str(x)).split(' ') for x in arr]
return SingleTable(prepared_arr).table
"""
Evaluation
"""
def eval(self, name, input=""):
script_section = self.get_script_section_and_config()['section']
script = script_section[name]
return self.processor.forward(script, stdin=input)
def eval_with_input(self, name):
def wrap(zpy_input):
self.eval(name=name, input=zpy_input)
return wrap
def last_zcommand(self):
return self.processor.last_zcommand
"""
Modules
"""
def get_modules(self):
return self.get_section(section=self.MODULE)
def remove_module(self, name):
return self.remove_from_section(section=self.MODULE, name=name)
def add_module(self, name, module):
return self.add_to_section(self.MODULE, name, module)
def get_module_dict(self):
section = self.get_section_and_config(self.MODULE)['section']
return section
"""
Default imports
"""
def get_def_imports(self):
return self.get_section(section=self.DEFAULT_IMPORTS)
def remove_def_imports(self, name):
return self.remove_from_section(section=self.DEFAULT_IMPORTS, name=name)
def add_def_imports(self, name, module):
return self.add_to_section(self.DEFAULT_IMPORTS, name, module)
def get_def_imports_dict(self):
section = self.get_section_and_config(self.DEFAULT_IMPORTS)['section']
return section | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/modules/helpful/zpy.py | zpy.py |
from Zpy.languages.python.python_lang import PythonLanguage
from Zpy.languages.shell.unix_lang import UnixLang
from Zpy.languages.js.js_lang import JavascriptLanguage
from Zpy.languages.chain_pool.for_lang import ForLanguage
class LanguageAnalyzer():
def __init__(self):
self.languages = [JavascriptLanguage(),UnixLang(),ForLanguage(), PythonLanguage()]
def get_lang_for_complete_line(self, line_):
"""
Analyze current line and returns language which will complete this line
:param line_: some command
:return: Language which will complete this line
>>> analize = LanguageAnalyzer().get_lang_for_complete_line
>>> analize('`git').__class__.__name__
'UnixLang'
"""
line = line_.strip()
selected_langs = list(filter(lambda lang: lang.isLangPrefix(line), self.languages))
if len(selected_langs) == 0:
raise Exception("Cannot find language for completion this line %s" % line)
if len(selected_langs) > 1:
raise Exception("Find more than one langs(%s) for comletion this line %s" % (selected_langs.join(","), line))
#print("FINDED LANG", selected_langs[0])
return selected_langs[0]
def analize(self, line):
"""
Analyze current command and returns language
:param line: some command
:return: language which have this syntax
>>> analize = LanguageAnalyzer().analize
>>> from Zpy.Utils import get_linux_commands
>>> unix_commands = get_linux_commands()
>>> analize('pwd').__class__.__name__
'UnixLang'
>>> list(filter(lambda x : "UnixLang" != x, [analize(x).__class__.__name__ for x in unix_commands]))
[]
>>> analize('`git').__class__.__name__
'UnixLang'
>>> analize('git').__class__.__name__
'PythonLanguage'
>>> analize('[i for i in range(11)').__class__.__name__
'PythonLanguage'
>>> analize(' j 2 + 3').__class__.__name__
'JavascriptLanguage'
>>> analize('[for] a').__class__.__name__
'ForLanguage'
"""
comm = line.strip()
selected_langs = list( filter(lambda lang : lang.isLang(comm), self.languages))
if len(selected_langs) == 0:
raise Exception("Cannot find language for evaluate this command %s" % comm)
if len(selected_langs) > 1:
raise Exception("Find more than one langs(%s) for this command %s" % (selected_langs.join(","), comm))
return selected_langs[0] | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/LanguageAnalyzer.py | LanguageAnalyzer.py |
from prompt_toolkit.completion import Completion
class PythonCompleter():
def __init__(self):
self.last_command = {
"command": "",
"command_arguments" : ""
}
self.completions = []
# List of completions
# Taken from `xonsh` and `pygments` modules
completion_1 = ('__import__', 'import', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict',
'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list',
'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct',
'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str',
'sum', 'super', 'tuple', 'type', 'vars', 'zip')
completion_2 = ('__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip')
completion_3 = ('assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with', 'from')
completion_4 = ('and', 'else', 'for', 'if', 'in', 'is', 'lambda', 'not', 'or',
'+', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<', '<',
'<=', '>', '>=', '==', '!=', ',', '?', '??')
completion_5 = ('as', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'except', 'finally:', 'from', 'global', 'import',
'nonlocal', 'pass', 'raise', 'return', 'try:', 'while', 'with',
'yield ', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<',
'<', '<=', '->', '=', '+=', '-=', '*=', '/=', '%=', '**=',
'>>=', '<<=', '&=', '^=', '|=', '//=', ';', ':', '..')
self.command_list = set(completion_1) | set(completion_2) | set(completion_3) | set(completion_4) | set(completion_5)
def get_python_completion(self, line):
"""
Get completion for python
:param line: line for completions
:return: list of completions or empty list
"""
return list(filter(lambda x : x.startswith(line), self.command_list))
def complete(self, line):
"""
:param line: Complete line
:return: generator of completion
>>> completer = PythonCompleter()
>>> "with" in [i.text for i in list(completer.complete('with'))]
True
>>> "import" in [i.text for i in list(completer.complete('import'))]
True
>>> "somecommm" in [i.text for i in list(completer.complete('import'))]
False
>>> [i.text for i in list(completer.complete('for im'))]
['import']
"""
if len(line) > 0 and line[-1] == " ":
#End of command, do not complete
return
commands = line.strip().split(' ')
if len(commands) == 1:
# Command without arguments
command = commands[0]
# Check this command was be already using in search(looking in cache)
if not line.startswith(self.last_command['command']) or len(self.last_command['command']) == 0:
self.last_command = {
"command": command,
"command_arguments": ""
}
self.completions = self.get_python_completion(line)
for completion in filter(lambda x: x.startswith(line), self.completions):
yield Completion(completion, start_position=-len(line))
else:
# Check for arguments
arguments = commands[1:]
arguments_joined = " ".join(arguments)
if not arguments_joined.startswith(self.last_command["command_arguments"]) or len(
self.last_command['command_arguments']) == 0:
self.last_command["command_arguments"] = arguments_joined
#Recursion
completions = self.complete(arguments[-1])
for completion in completions:
yield Completion(completion.text, start_position=-len(arguments[-1])) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/python/python_completer.py | python_completer.py |
import inspect
from Zpy.languages.Language import Language
from Zpy.languages.js.js_lang import JavascriptLanguage
from Zpy.languages.python.python_completer import PythonCompleter
from Zpy.languages.shell.unix_lang import UnixLang
from Zpy.languages.chain_pool.for_lang import ForLanguage
from Zpy.modules.helpful.zpy import zpy
from Zpy.modules.module_manager import ModuleManager
class PythonLanguage(Language):
def __init__(self):
super(PythonLanguage, self).__init__()
self.UnixLang = UnixLang()
self.JSLang = JavascriptLanguage()
self.ForLang = ForLanguage()
self.completer = PythonCompleter()
self.exec_command = []
self.module_manager = ModuleManager()
self.zpy = zpy(processor=None)
def isLang(self, line):
#If not unix :)
"""
:param line: command
:return: its python command
>>> PythonLanguage().isLang("ls")
False
>>> PythonLanguage().isLang("2 * 3")
True
"""
return not self.UnixLang.isLang(line) and not self.JSLang.isLang(line) and not self.ForLang.isLang(line)
def isLangPrefix(self, line):
"""
Do the same as isLang method, except evaluation. This lang will be evaluated for completion
:param line: command
:return: True if this lang
"""
return self.isLang(line)
def complete(self, line):
"""
Complete this line
:param line: line for completion
:return: generator of completions
>>> completer = PythonLanguage().completer
>>> sorted([i.text for i in list(completer.complete('fo'))])
['for', 'format']
>>> sorted([i.text for i in list(completer.complete('for'))])
['for', 'format']
>>> len(sorted([i.text for i in list(completer.complete('f'))]))>2
True
>>> sorted([i.text for i in list(completer.complete('fo'))])
['for', 'format']
>>> "with" in [i.text for i in list(completer.complete('with'))]
True
>>> "import" in [i.text for i in list(completer.complete('import'))]
True
>>> "somecommm" in [i.text for i in list(completer.complete('import'))]
False
>>> len(sorted([i.text for i in list(completer.complete(''))])) > 10
True
>>> "import" in [i.text for i in list(completer.complete('import'))]
True
"""
return self.completer.complete(line)
def get_module(self, processor):
if processor is None:
return {}
return self.module_manager.get_modules(processor=processor)
# TODO OPTIMIZE
def evaluate(self, line, processor=None, stdin=""):
"""
Evaluate python
:param line: python line
:param processor: zpy-processor
:param stdin: stdin wiil be passed in variable `z`
:return: result of evaluation
>>> pl = PythonLanguage()
>>> pl.evaluate(" 2 + 3 + 7 + 8")
20
>>> pl.evaluate("ls")
NameError("name 'ls' is not defined",)
>>> pl.evaluate("z['x'] * 15", stdin={'x':15})
225
>>> pl.evaluate("~import os, uuid")
''
>>> pl.evaluate("len(str(uuid.uuid4()))")
36
"""
if len(line) > 0 and line[0] == "~":
self.exec_command.append(line[1:])
return ""
# Add default imports
default_imports = self.zpy.get_def_imports_dict()
for name, imp in default_imports.items():
if imp not in self.exec_command:
self.exec_command.append(imp)
exec("\n".join(self.exec_command) + "\n")
# Set z-variable
z = stdin
# Set modules
for name, module in self.get_module(processor).items():
locals()[name.split(".")[-1]] = module
locals()["zpy"] = self.zpy
try:
res = eval(line)
is_func = inspect.isfunction(res) or inspect.ismethod(res)
if is_func and 'zpy_input' in inspect.getargspec(res).args:
return res(zpy_input=stdin)
elif is_func or inspect.isroutine(res):
return res(stdin)
return res
except Exception as ex:
return ex | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/python/python_lang.py | python_lang.py |
from Zpy.languages.Language import Language
#from Zpy.Processor import Processor
import re
import json
class ForLanguage(Language):
def __init__(self):
super(ForLanguage, self).__init__()
self.lang_regex = r"( *?\[.*?for.*?\] )"
self.lang_regex_compiled = re.compile(self.lang_regex)
def isLangPrefix(self, line):
return super().isLangPrefix(line)
def complete(self, line):
return ""
def prepare(self,line):
"""
Remove `j` character at begin
:param line: line to preparing
:return: striped line without j-symbol at begining
>>> prepare = ForLanguage().prepare
>>> prepare(" [ for ] j [1,2,3,4].map( function(e) { return e*2 } )")
'j [1,2,3,4].map( function(e) { return e*2 } )'
>>> prepare("[for] some string")
'some string'
"""
return re.sub(self.lang_regex,"",line,1).strip()
def isLang(self, line):
"""
JS language contain upper case letter `j` at begin, like ' j [1,2,3,4].map( function(e) { return e*2 } )'
:param line: command
:return: True if this is js syntax False otherwise
>>> isLang = ForLanguage().isLang
>>> isLang("[for] j 2 + 3 * 4")
True
>>> isLang("[for ] j 2 + 3 * 4")
True
>>> isLang("j 8 + 5")
False
"""
return self.lang_regex_compiled.match(line) is not None
def evaluate(self, line, processor=None, stdin=""):
"""
:param line:
:param processor:
:param stdin:
:return:
>>> evaluate = ForLanguage().evaluate
>>> proc = Processor()
>>> evaluate("for j z + 4",proc,[2,3,4])
[6, 7, 8]
>>> evaluate("for j z + 'asd'",proc,['a','b',3])
['aasd', 'basd', '3asd']
"""
line = self.prepare(line)
if stdin is None or len(stdin) == 0:
raise Exception("No stdin passed into FOR language")
if processor == None:
raise Exception("No processor passed into evaluate method")
iter_stdin = stdin
# Check is Iterable
#EEROOR STRING IS ITERABLE
"""
iterable = True
try:
iterator = iter(stdin)
except TypeError:
# not iterable
iterable = False
"""
iterable = isinstance(stdin,list) or isinstance(stdin, tuple)
# Split or add new dimension if is not iterable
if not iterable:
if isinstance(stdin,str):
iter_stdin = stdin.split("\n")
else:
iter_stdin = [stdin]
result = []
for stdin_obj in iter_stdin:
result.append(processor.forward(line, stdin=stdin_obj))
return result | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/chain_pool/for_lang.py | for_lang.py |
import os
import re
import json
from Zpy.languages.Language import Language
from Zpy.modules.helpful.zjs import zjs
import execjs
class JavascriptLanguage(Language):
def __init__(self):
super(JavascriptLanguage, self).__init__()
self.lang_regex = r"( *?j )"
self.lang_regex_compiled = re.compile( self.lang_regex)
self.zjs = zjs(processor=None)
#self.exec_command = ['fs = require("fs")','request = require("request")']
self.exec_command=[]
def prepare(self,line):
"""
Remove `j` character at begin
:param line: line to preparing
:return: striped line without j-symbol at begining
>>> prepare = JavascriptLanguage().prepare
>>> prepare(" j [1,2,3,4].map( function(e) { return e*2 } )")
'[1,2,3,4].map( function(e) { return e*2 } )'
>>> prepare("j some string")
'some string'
"""
return re.sub(self.lang_regex,"",line,1).strip()
def isLangPrefix(self, line):
return self.isLang(line)
def isLang(self, line):
"""
JS language contain upper case letter `j` at begin, like ' j [1,2,3,4].map( function(e) { return e*2 } )'
:param line: command
:return: True if this is js syntax False otherwise
>>> isLang = JavascriptLanguage().isLang
>>> isLang(" j 2 + 3 * 4")
True
>>> isLang(" 8 + 5")
False
"""
return self.lang_regex_compiled.match(line) is not None
def complete(self, line):
return ""
def get_require_regex(self):
"""
:return: regex for require statement
"""
regex = r"(?:(?:var|const|\ *?)\s*(.*?)\s*=\s*)?require\(['\"]([^'\"]+)['\"](?:, ['\"]([^'\"]+a)['\"])?\);?"
return regex
def get_require_modules(self,str):
"""
Find require statement
:param str:
:return: None if nothing finded, otherwise return arrays where elements is array with 2 elements, when first item is alias and second required module
>>> g = JavascriptLanguage().get_require_modules
>>> g('fs = require("fs");some=require("module")')
[['fs', 'fs'], ['some', 'module']]
>>> g('var req = require("request")')
[['req', 'request']]
>>> g("varreq=req")
"""
requirements = []
matches = re.finditer(self.get_require_regex(), str)
for match in matches:
if match.group(1) is not None and match.group(2) is not None:
requirements.append([match.group(1),match.group(2)])
if len(requirements) == 0:
return None
return requirements
def evaluate(self, line, processor=None, stdin=""):
"""
Evaluate js code
:param line: line for evaluation
param processor: Zpy processor
:param stdin: stdin will be passed to lang as Z variable
:return: evaluation results
>>> eval = JavascriptLanguage().evaluate
>>> eval("j setTimeout(function(){ sync_err('ASYNC') },200)")
Traceback (most recent call last):
...
execjs._exceptions.ProgramError: ASYNC
>>> eval("j setTimeout(function(){ sync('ASYNC') },200)")
'ASYNC'
>>> eval('j 2 + 3')
5
"""
# >>> eval("j fs.writeFileSync('/tmp/zpy.tmp', 'Zpy work with js!!!')")
# >>> eval("j fs.readFileSync('/tmp/zpy.tmp', 'utf8')")
#'Zpy work with js!!!'
line = self.prepare(line)
requirements = self.get_require_modules(line)
if requirements is not None:
for requirement in requirements:
comm = "%s = require('%s')" % (requirement[0],requirement[1])
if comm not in requirement:
self.exec_command.append(comm)
return "Added new requirements : { %s }" % str(requirements)
# Add default imports
default_imports = self.zjs.get_def_imports_dict()
for name, imp in default_imports.items():
comm = "%s = %s"%(name,imp)
if comm not in self.exec_command:
self.exec_command.append(comm)
z_variable = [] if stdin=="" else ['var z = %s' % json.dumps(stdin)]
sync_add_function = """
var sync_add = function(val){
sync_end.results.push(val)
}
"""
sync_end_function = """
var sync_end = function(){
process.stdout.write(JSON.stringify(['ok',sync_end.results]));
process.stdout.write( '\\n' );
process.exit(0)
}
sync_end.results = []
"""
sync_function = """
var sync = function(val) {
process.stdout.write(JSON.stringify(['ok',val]));
process.stdout.write( '\\n' );
process.exit(0)
} """
sync_err = """
var sync_err = function(err){
process.stdout.write(JSON.stringify(['err', err]));
process.stdout.write( '\\n' );
process.exit(0)
}
"""
regex_syncs_functions = r"sync_err.*?\(.*?\) | sync\(.*?\)"
for _ in (re.finditer(regex_syncs_functions, line)):
if(len(line)) > 0:
line = "(" + line + """) && Object.assign(function() { } ,{'skip_print':"zkip_"})""" #Set result of evaluation undefined, remove converting our function to result
break
ctx = execjs.compile(";\n".join(self.exec_command \
+ z_variable \
+[sync_function,sync_err] ))
return ctx.eval(line) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/js/js_lang.py | js_lang.py |
import subprocess
from prompt_toolkit.completion import Completion
class UnixCompleter:
def __init__(self):
self.last_command = {
"command": "",
"command_arguments" : ""
}
self.completions = []
def get_unix_completions(self, line):
"""
Get unix complete by using `compgen`
:param line:
:return: array of completions
>>> completer = UnixCompleter()
>>> "ls" in completer.get_unix_completions('l')
True
>>> len(completer.get_unix_completions('pnoqincoqnaoszni2oindo1noozincoinscmzmcnzxx1211'))
0
"""
proc = subprocess.Popen(['compgen -c %s' % line], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)
return list(set(proc.communicate()[0].decode().strip().split('\n'))) # Only unique
def complete(self, line):
"""
:param line: Complete line
:return: generator of completion
True
"""
if len(line) > 0 and line[-1] == " ":
#End of command do not complete
return
commands = line.strip().split(' ')
if len(commands) == 1:
# Command without arguments
command = commands[0]
# Check this command was be already using in search(looking in cache)
if not line.startswith(self.last_command['command']) or len(self.last_command['command']) == 0:
self.last_command = {
"command": command,
"command_arguments" : ""
}
self.completions = self.get_unix_completions(line)
for completion in filter(lambda x: x.startswith(line), self.completions):
yield Completion(completion, start_position=-len(line) )
else:
# Check for arguments
arguments = commands[1:]
arguments_joined = " ".join(arguments)
if not arguments_joined.startswith(self.last_command["command_arguments"]) or len(self.last_command['command_arguments']) == 0:
self.last_command["command_arguments"] = arguments_joined
# Recursion
completions = self.complete(arguments[-1])
for completion in completions:
yield Completion(completion.text, start_position=-len(arguments[-1])) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/shell/unix_completer.py | unix_completer.py |
import subprocess, os, re
from Zpy.Utils import get_linux_commands
from Zpy.storage.SuffixTree import SuffixTree
from Zpy.languages.Language import Language
from Zpy.languages.shell.unix_completer import UnixCompleter
import json
class WrondDstDirException(Exception): pass
class UnixLang(Language):
def __init__(self):
super(UnixLang, self).__init__()
self.buildTree()
self.current_dir = os.getcwd()
self.unix_completer = UnixCompleter()
def buildTree(self):
"""
Build suffix tree for fast search
:return:
"""
commands = get_linux_commands()
#Add leaf data column
self.Tree = SuffixTree([{"word": x, "leaf_data": True} for x in commands])
def isLang(self, line):
"""
Check if line is shell command
:param line: command
:return: True if is shell else False
>>> UnixLang().isLang("`vim")
True
>>> UnixLang().isLang("ls")
True
>>> UnixLang().isLang("2+3")
False
"""
return (len(line) > 0 and line[0]=="`") or self.Tree.find(line)
def isLangPrefix(self, line):
"""
Do the same as isLang method, except evaluation. This lang will be evaluated for completion
:param line: command
:return: True if this lang
"""
return self.isLang(line)
def complete(self, line):
"""
Complete this line
:param line: line for completion
:return: generator of completions
>>> completer = UnixLang()
>>> "ls" in [i.text for i in list(completer.complete('l'))]
True
"""
return self.unix_completer.complete(self.prepare(line))
def prepare(self,line):
"""
Prepare line to execution.
Remove ` character from line if their present in line and position of character is first.
:param line: line to prepare
:return: prepared line to execution
>>> UnixLang().prepare("`ls")
'ls'
>>> UnixLang().prepare("````````````````````````````pwd")
'pwd'
>>> UnixLang().prepare("ls")
'ls'
>>> UnixLang().prepare([i for i in range(10)])
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'
"""
if len(line) > 0 and line[0]=="`":
return self.prepare(line[1:])
return str(line)
def set_directory(self, dir):
"""
Change current directory
:param dir: new directory
:return: True if success else raise WrondDstDirException
>>> import os,tempfile
>>> ul = UnixLang()
>>> tmpdir = tempfile.gettempdir()
>>> newdir = os.path.join(tmpdir,"some/cool/dir/yeah")
>>> os.makedirs(newdir,exist_ok=True)
>>> ul.set_directory(newdir)
>>> os.getcwd()[-len("some/cool/dir/yeah"):]
'some/cool/dir/yeah'
>>> ul.evaluate('cd ..')
''
>>> ul.evaluate('ls',return_result_anyway=True).strip()
'yeah'
>>> ul.evaluate('cd ../../../some')
''
>>> ul.evaluate('ls',return_result_anyway=True).strip()
'cool'
"""
## Expand ~
expanded_path = os.path.expanduser(dir)
##Join
joined_path = os.path.join(self.current_dir, expanded_path)
if os.path.isdir(joined_path):
os.chdir(joined_path)
self.current_dir = joined_path
else:
raise WrondDstDirException("%s is not directory!" % dir)
def create_proc(self,line, env = None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None):
"""
Create subprocess Popen
https://docs.python.org/3/library/subprocess.html
:param line: line to evaluate
:param env: Env variable
:param stdin: stdin type
:param stdout: stdout type
:param stderr: stderr type
read more about types
https://docs.python.org/3/library/subprocess.html#subprocess.DEVNULL
:return: Popen
>>> UnixLang().create_proc('echo "DEVNULL|PIPE|STDOUT"', stdout=subprocess.PIPE).communicate()[0].decode().strip()
'DEVNULL|PIPE|STDOUT'
"""
proc = subprocess.Popen([line],
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=True,
env=env,
cwd=self.current_dir)
return proc
def eval(self, line, processor = None, env = None, input="", return_result_anyway = False):
"""
Helping function for evaluation
"""
if processor is not None and processor.info['pipes_count'] == 1:
subprocess.check_call([line], shell=True, env=env)
return ""
elif return_result_anyway or (processor is not None and processor.info['pipes_count'] > 1):
proc = self.create_proc(line ,env, stdin=subprocess.PIPE,stdout=subprocess.PIPE)
else:
subprocess.check_call([line], shell=True,env=env)
return ""
out = proc.communicate(str(input).encode())
if len(out) == 0:
return ""
try:
return out[0].decode()
except Exception as ex:
return str(out[0])[2:-1]
def evaluate(self, line, processor = None, stdin ="", return_result_anyway = True):
"""
Evaluate shell command
:param line: line to execute
:param processor: Processor instance
:param stdin: stdin will be passed to stdin PIPE
:param return_result_anyway: by default if pipes count = 1, result not will be returned, command will be executed and result will be passed to stdout
:return: result of evaluation, if processor.info['pipes_count'] = 1, nothing will be returned
>>> UnixLang().evaluate("echo 'xyz\\ncde\\n123' | sort -n",return_result_anyway=True).strip().split('\\n')
['cde', 'xyz', '123']
>>> UnixLang().evaluate("echo 'xyz\\ncde\\n123' | cat -",return_result_anyway=True).strip().split('\\n')
['xyz', 'cde', '123']
>>> os.path.isdir(UnixLang().evaluate("pwd").strip())
True
"""
line = self.prepare(line)
if isinstance(stdin, list) or isinstance(stdin, tuple):
#Try convert each item to str
try:
tmp_stdin = []
for item in stdin:
tmp_stdin.append(str(item))
stdin = "\n".join(tmp_stdin)
except:
stdin = json.dumps(stdin)
elif not isinstance(stdin, str):
stdin = json.dumps(stdin)
env = os.environ.copy()
env['z'] = stdin
#is change dir
match = re.match(r'cd(?:\s+|$)(.*)', line)
if match:
dirs = match.groups()
# Default to cd is home directory
if len(dirs) == 0 or len(dirs[0]) == 0:
self.set_directory(os.environ['HOME'])
else:
dir = dirs[0]
if dir == '..':
head, tail = os.path.split(self.current_dir)
self.set_directory(head)
else:
self.set_directory(dir)
return ""
return self.eval(line, processor, env, stdin, return_result_anyway=return_result_anyway) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/shell/unix_lang.py | unix_lang.py |
def get_welcome_message():
"""
Welcome message for Zpy
:return: Welcome message
"""
a=r"""
_/ _/
_/ _/_/_/_/_/ _/
_/ _/ _/_/_/ _/ _/ _/
_/ _/ _/ _/ _/ _/ _/
_/ _/ _/ _/ _/ _/ _/
_/ _/_/_/_/_/ _/_/_/ _/_/_/ _/
_/ _/ _/ _/
_/ _/_/
"""
b="""
_______ __
/ /__ /_ __ _ _\ \ | \033[37m Zpy\033[0m
| | / /| '_ \| | | | | | \033[164m Shell for everyone\033[0m
| | / /_| |_) | |_| | | |
| |/____| .__/ \__, | | |
\_\ |_| |____/_/ | \033[94m https://github.com/albertaleksieiev/zpy\033[0m\033
"""
c=r"""
___ ___ ___
/\ \ /\ \ |\__\
\:\ \ /::\ \ |:| |
\:\ \ /:/\:\ \ |:| |
\:\ \ /::\~\:\ \ |:|__|__
_______\:\__\ /:/\:\ \:\__\ /::::\__\
\::::::::/__/ \/__\:\/:/ / /:/~~/~
\:\~~\~~ \::/ / /:/ /
\:\ \ \/__/ \/__/
\:\__\
\/__/
"""
return b
return r"""
_____ _____ _____
/\ \ /\ \ |\ \
/::\ \ /::\ \ |:\____\
\:::\ \ /::::\ \ |::| |
\:::\ \ /::::::\ \ |::| |
\:::\ \ /:::/\:::\ \ |::| |
\:::\ \ /:::/__\:::\ \ |::| |
\:::\ \ /::::\ \:::\ \ |::| |
\:::\ \ /::::::\ \:::\ \ |::|___|______
\:::\ \ /:::/\:::\ \:::\____\ /::::::::\ \
_______________\:::\____\/:::/ \:::\ \:::| | /::::::::::\____\
\::::::::::::::::::/ /\::/ \:::\ /:::|____| /:::/~~~~/~~
\::::::::::::::::/____/ \/_____/\:::\/:::/ / /:::/ /
\:::\~~~~\~~~~~~ \::::::/ / /:::/ /
\:::\ \ \::::/ / /:::/ /
\:::\ \ \::/____/ \::/ /
\:::\ \ ~~ \/____/
\:::\ \
\:::\____\
\::/ /
\/____/
"""
def get_bye_message():
return "\033[1m Bye bye!\033[0m" | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/frontend/messages.py | messages.py |
import json
import contextlib
import execjs._exceptions as exceptions
from execjs._abstract_runtime import AbstractRuntime
from execjs._abstract_runtime_context import AbstractRuntimeContext
from execjs._misc import encode_unicode_codepoints
try:
import PyV8
except ImportError:
_pyv8_available = False
else:
_pyv8_available = True
class PyV8Runtime(AbstractRuntime):
'''Runtime to execute codes with PyV8.'''
def __init__(self):
pass
@property
def name(self):
return "PyV8"
def _compile(self, source, cwd=None):
return self.Context(source)
def is_available(self):
return _pyv8_available
class Context(AbstractRuntimeContext):
def __init__(self, source=""):
self._source = source
def is_available(self):
return _pyv8_available
def _exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
# backward compatibility
with contextlib.nested(PyV8.JSContext(), PyV8.JSEngine()) as (ctxt, engine):
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise exceptions.RuntimeError(e)
try:
value = script.run()
except js_errors as e:
raise exceptions.ProgramError(e)
return self.convert(value)
def _eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def _call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def convert(cls, obj):
from PyV8 import _PyV8
if isinstance(obj, bytes):
return obj.decode('utf8')
if isinstance(obj, _PyV8.JSArray):
return [cls.convert(v) for v in obj]
elif isinstance(obj, _PyV8.JSFunction):
return None
elif isinstance(obj, _PyV8.JSObject):
ret = {}
for k in obj.keys():
v = cls.convert(obj[k])
if v is not None:
ret[cls.convert(k)] = v
return ret
else:
return obj | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/execjs/_pyv8runtime.py | _pyv8runtime.py |
import os.path
from collections import OrderedDict
import execjs.runtime_names as runtime_names
import execjs._external_runtime as external_runtime
import execjs._pyv8runtime as pyv8runtime
import execjs._exceptions as exceptions
def register(name, runtime):
'''Register a JavaScript runtime.'''
_runtimes.append((name, runtime))
def get(name=None):
"""
Return a appropriate JavaScript runtime.
If name is specified, return the runtime.
"""
if name is None:
return get_from_environment() or _find_available_runtime()
return _find_runtime_by_name(name)
def runtimes():
"""return a dictionary of all supported JavaScript runtimes."""
return OrderedDict(_runtimes)
def get_from_environment():
'''
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None.
'''
name = os.environ.get("EXECJS_RUNTIME", "")
if not name:
return None
try:
return _find_runtime_by_name(name)
except exceptions.RuntimeUnavailableError:
return None
def _find_available_runtime():
for _, runtime in _runtimes:
if runtime.is_available():
return runtime
raise exceptions.RuntimeUnavailableError("Could not find an available JavaScript runtime.")
def _find_runtime_by_name(name):
for runtime_name, runtime in _runtimes:
if runtime_name.lower() == name.lower():
break
else:
raise exceptions.RuntimeUnavailableError("{name} runtime is not defined".format(name=name))
if not runtime.is_available():
raise exceptions.RuntimeUnavailableError(
"{name} runtime is not available on this system".format(name=runtime.name))
return runtime
_runtimes = []
register(runtime_names.PyV8, pyv8runtime.PyV8Runtime())
register(runtime_names.Node, external_runtime.node())
register(runtime_names.JavaScriptCore, external_runtime.jsc())
register(runtime_names.SpiderMonkey, external_runtime.spidermonkey())
register(runtime_names.JScript, external_runtime.jscript())
register(runtime_names.PhantomJS, external_runtime.phantomjs())
register(runtime_names.SlimerJS, external_runtime.slimerjs())
register(runtime_names.Nashorn, external_runtime.nashorn()) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/execjs/_runtimes.py | _runtimes.py |
def _json2_source():
# The folowing code is json2.js(https://github.com/douglascrockford/JSON-js).
# It is compressed by YUI Compressor Online(http://yui.2clics.net/).
return 'var JSON;if(!JSON){JSON={}}(function(){function f(n){return n<10?"0"+n:n}if(typeof Date.prototype.toJSON!=="function"){Date.prototype.toJSON=function(key){return isFinite(this.valueOf())?this.getUTCFullYear()+"-"+f(this.getUTCMonth()+1)+"-"+f(this.getUTCDate())+"T"+f(this.getUTCHours())+":"+f(this.getUTCMinutes())+":"+f(this.getUTCSeconds())+"Z":null};String.prototype.toJSON=Number.prototype.toJSON=Boolean.prototype.toJSON=function(key){return this.valueOf()}}var cx=/[\\u0000\\u00ad\\u0600-\\u0604\\u070f\\u17b4\\u17b5\\u200c-\\u200f\\u2028-\\u202f\\u2060-\\u206f\\ufeff\\ufff0-\\uffff]/g,escapable=/[\\\\\\"\\x00-\\x1f\\x7f-\\x9f\\u00ad\\u0600-\\u0604\\u070f\\u17b4\\u17b5\\u200c-\\u200f\\u2028-\\u202f\\u2060-\\u206f\\ufeff\\ufff0-\\uffff]/g,gap,indent,meta={"\\b":"\\\\b","\\t":"\\\\t","\\n":"\\\\n","\\f":"\\\\f","\\r":"\\\\r",\'"\':\'\\\\"\',"\\\\":"\\\\\\\\"},rep;function quote(string){escapable.lastIndex=0;return escapable.test(string)?\'"\'+string.replace(escapable,function(a){var c=meta[a];return typeof c==="string"?c:"\\\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)})+\'"\':\'"\'+string+\'"\'}function str(key,holder){var i,k,v,length,mind=gap,partial,value=holder[key];if(value&&typeof value==="object"&&typeof value.toJSON==="function"){value=value.toJSON(key)}if(typeof rep==="function"){value=rep.call(holder,key,value)}switch(typeof value){case"string":return quote(value);case"number":return isFinite(value)?String(value):"null";case"boolean":case"null":return String(value);case"object":if(!value){return"null"}gap+=indent;partial=[];if(Object.prototype.toString.apply(value)==="[object Array]"){length=value.length;for(i=0;i<length;i+=1){partial[i]=str(i,value)||"null"}v=partial.length===0?"[]":gap?"[\\n"+gap+partial.join(",\\n"+gap)+"\\n"+mind+"]":"["+partial.join(",")+"]";gap=mind;return v}if(rep&&typeof rep==="object"){length=rep.length;for(i=0;i<length;i+=1){if(typeof rep[i]==="string"){k=rep[i];v=str(k,value);if(v){partial.push(quote(k)+(gap?": ":":")+v)}}}}else{for(k in value){if(Object.prototype.hasOwnProperty.call(value,k)){v=str(k,value);if(v){partial.push(quote(k)+(gap?": ":":")+v)}}}}v=partial.length===0?"{}":gap?"{\\n"+gap+partial.join(",\\n"+gap)+"\\n"+mind+"}":"{"+partial.join(",")+"}";gap=mind;return v}}if(typeof JSON.stringify!=="function"){JSON.stringify=function(value,replacer,space){var i;gap="";indent="";if(typeof space==="number"){for(i=0;i<space;i+=1){indent+=" "}}else{if(typeof space==="string"){indent=space}}rep=replacer;if(replacer&&typeof replacer!=="function"&&(typeof replacer!=="object"||typeof replacer.length!=="number")){throw new Error("JSON.stringify")}return str("",{"":value})}}if(typeof JSON.parse!=="function"){JSON.parse=function(text,reviver){var j;function walk(holder,key){var k,v,value=holder[key];if(value&&typeof value==="object"){for(k in value){if(Object.prototype.hasOwnProperty.call(value,k)){v=walk(value,k);if(v!==undefined){value[k]=v}else{delete value[k]}}}}return reviver.call(holder,key,value)}text=String(text);cx.lastIndex=0;if(cx.test(text)){text=text.replace(cx,function(a){return"\\\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)})}if(/^[\\],:{}\\s]*$/.test(text.replace(/\\\\(?:["\\\\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\\\\n\\r]*"|true|false|null|-?\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)?/g,"]").replace(/(?:^|:|,)(?:\\s*\\[)+/g,""))){j=eval("("+text+")");return typeof reviver==="function"?walk({"":j},""):j}throw new SyntaxError("JSON.parse")}}}());' | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/execjs/_json2.py | _json2.py |
from __future__ import unicode_literals, division, with_statement
Node = r"""(function(program, execJS) { execJS(program) })(function() { #{source}
}, function(program) {
var output;
var print = function(string) {
process.stdout.write('' + string + '\n');
};
try {
result = program();
print('')
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
}
else if(typeof result == 'function' && result.skip_print == "zkip_"){
}
else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});"""
JavaScriptCore = r"""(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
var output;
try {
result = program();
print("");
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
"""
SpiderMonkey = r"""(function(program, execJS) { execJS(program) })(function() { #{source}
}, function(program) {
#{json2_source}
var output;
try {
result = program();
print("");
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
"""
Nashorn = SpiderMonkey
JScript = r"""(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
#{json2_source}
var output, print = function(string) {
string = string.replace(/[^\x00-\x7f]/g, function(ch){
return '\\u' + ('0000' + ch.charCodeAt(0).toString(16)).slice(-4);
});
WScript.Echo(string);
};
try {
result = program();
print("")
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', err.name + ': ' + err.message]));
}
});
"""
PhantomJS = r"""
(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
var output;
var print = function(string) {
console.log('' + string);
};
try {
result = program();
print('')
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
phantom.exit();
"""
SlimerJS = PhantomJS | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/execjs/_runner_sources.py | _runner_sources.py |
from subprocess import Popen, PIPE
import io
import json
import os
import os.path
import platform
import re
import stat
import sys
import tempfile
import six
import execjs._json2 as _json2
import execjs._runner_sources as _runner_sources
import execjs._exceptions as exceptions
from execjs._abstract_runtime import AbstractRuntime
from execjs._abstract_runtime_context import AbstractRuntimeContext
from execjs._misc import encode_unicode_codepoints
class ExternalRuntime(AbstractRuntime):
'''Runtime to execute codes with external command.'''
def __init__(self, name, command, runner_source, encoding='utf8', tempfile=False):
self._name = name
if isinstance(command, str):
command = [command]
self._command = command
self._runner_source = runner_source
self._encoding = encoding
self._tempfile = tempfile
self._available = self._binary() is not None
def __str__(self):
return "{class_name}({runtime_name})".format(
class_name=type(self).__name__,
runtime_name=self._name,
)
@property
def name(self):
return self._name
def is_available(self):
return self._available
def _compile(self, source, cwd=None):
return self.Context(self, source, cwd=cwd, tempfile=tempfile)
def _binary(self):
if not hasattr(self, "_binary_cache"):
self._binary_cache = _which(self._command)
return self._binary_cache
class Context(AbstractRuntimeContext):
# protected
def __init__(self, runtime, source='', cwd=None, tempfile=False):
self._runtime = runtime
self._source = source
self._cwd = cwd
self._tempfile = tempfile
def is_available(self):
return self._runtime.is_available()
def _eval(self, source):
if not source.strip():
data = "''"
else:
data = "'('+" + json.dumps(source, ensure_ascii=True) + "+')'"
code = 'return eval({data})'.format(data=data)
return self.exec_(code)
def _exec_(self, source):
if self._source:
source = self._source + '\n' + source
if self._tempfile:
output = self._exec_with_tempfile(source)
else:
output = self._exec_with_pipe(source)
return self._extract_result(output)
def _call(self, identifier, *args):
args = json.dumps(args)
return self._eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
def _exec_with_pipe(self, source):
cmd = self._runtime._binary()
p = None
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=self._cwd, universal_newlines=True)
stdoutdata, stderrdata = p.communicate(input=source)
ret = p.wait()
finally:
del p
self._fail_on_non_zero_status(ret, stdoutdata, stderrdata)
return stdoutdata
def _exec_with_tempfile(self, source):
#(fd, filename) = tempfile.mkstemp(prefix='execjs', suffix='.js')
#os.close(fd)
filename = os.path.join(os.getcwd(),".execjs_tmp_file.js")
try:
with io.open(filename, "w+", encoding=self._runtime._encoding) as fp:
fp.write(self._compile(source))
cmd = self._runtime._binary() + [filename]
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=self._cwd)
stdoutdata, stderrdata = p.communicate()
ret = p.wait()
finally:
del p
self._fail_on_non_zero_status(ret, stdoutdata, stderrdata)
return stdoutdata
finally:
os.remove(filename)
def _fail_on_non_zero_status(self, status, stdoutdata, stderrdata):
if status != 0:
raise exceptions.RuntimeError("stdout: {}, stderr: {}".format(repr(stdoutdata), repr(stderrdata)))
def _compile(self, source):
runner_source = self._runtime._runner_source
replacements = {
'#{source}': lambda: source,
'#{encoded_source}': lambda: json.dumps(
"(function(){ " +
encode_unicode_codepoints(source) +
" })()"
),
'#{json2_source}': _json2._json2_source,
}
pattern = "|".join(re.escape(k) for k in replacements)
runner_source = re.sub(pattern, lambda m: replacements[m.group(0)](), runner_source)
return runner_source
def _extract_result(self, output):
output = output.decode(self._runtime._encoding)
output = output.replace("\r\n", "\n").replace("\r", "\n")
output_last_line = output.split("\n")[-2]
if not output_last_line:
status = value = None
else:
ret = json.loads(output_last_line)
if len(ret) == 1:
ret = [ret[0], None]
status, value = ret
if status == "ok":
return value
elif value.startswith('SyntaxError:'):
raise exceptions.RuntimeError(value)
else:
raise exceptions.ProgramError(value)
def _is_windows():
"""protected"""
return platform.system() == 'Windows'
def _decode_if_not_text(s):
"""protected"""
if isinstance(s, six.text_type):
return s
return s.decode(sys.getfilesystemencoding())
def _find_executable(prog, pathext=("",)):
"""protected"""
pathlist = _decode_if_not_text(os.environ.get('PATH', '')).split(os.pathsep)
for dir in pathlist:
for ext in pathext:
filename = os.path.join(dir, prog + ext)
try:
st = os.stat(filename)
except os.error:
continue
if stat.S_ISREG(st.st_mode) and (stat.S_IMODE(st.st_mode) & 0o111):
return filename
return None
def _which(command):
"""protected"""
if isinstance(command, str):
command = [command]
command = list(command)
name = command[0]
args = command[1:]
if _is_windows():
pathext = _decode_if_not_text(os.environ.get("PATHEXT", ""))
path = _find_executable(name, pathext.split(os.pathsep))
else:
path = _find_executable(name)
if not path:
return None
return [path] + args
def node():
r = node_node()
if r.is_available():
return r
return node_nodejs()
def node_node():
return ExternalRuntime(
name="Node.js (V8)",
command=['node'],
encoding='UTF-8',
runner_source=_runner_sources.Node
)
def node_nodejs():
return ExternalRuntime(
name="Node.js (V8)",
command=['nodejs'],
encoding='UTF-8',
runner_source=_runner_sources.Node
)
def jsc():
return ExternalRuntime(
name="JavaScriptCore",
command=["/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Resources/jsc"],
runner_source=_runner_sources.JavaScriptCore
)
def spidermonkey():
return ExternalRuntime(
name="SpiderMonkey",
command=["js"],
runner_source=_runner_sources.SpiderMonkey
)
def jscript():
return ExternalRuntime(
name="JScript",
command=["cscript", "//E:jscript", "//Nologo"],
encoding="ascii",
runner_source=_runner_sources.JScript,
tempfile=True
)
def phantomjs():
return ExternalRuntime(
name="PhantomJS",
command=["phantomjs"],
runner_source=_runner_sources.PhantomJS
)
def slimerjs():
return ExternalRuntime(
name="SlimerJS",
command=["slimerjs"],
runner_source=_runner_sources.SlimerJS
)
def nashorn():
return ExternalRuntime(
name="Nashorn",
command=["jjs"],
runner_source=_runner_sources.Nashorn
) | zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/execjs/_external_runtime.py | _external_runtime.py |
import os
import time
import logging
from kazoo.client import KazooClient
from datetime import datetime
def look_for_missed_broker_id(broker_ids, min_broker_id, max_broker_id):
# loop from 1001 to 1006 included
for broker_id in range(min_broker_id, max_broker_id + 1, 1):
if str(broker_id) not in broker_ids:
return broker_id
return -1
def create_or_update_meta(log_dir, meta_file, broker_id):
# loop from 1001 to 1006 included
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as exception:
print("Create log dir (%s) error, the error is %s" % (log_dir, exception.message))
# stop the following operation until error solved
return
# check whether it's brand new broker, if not, then return
for topic_dir in os.listdir(log_dir):
# if any topic exist in the kafka log directory, we will see it old kafka broker, then return directly
if os.path.isdir(topic_dir):
print("topic is exist in %s, so return directly." % topic_dir)
return
# check meta.properties exist
meta_file_path = log_dir + "/" + meta_file;
if os.path.exists(meta_file_path):
# the new broker is created conflict, so reset the broker id to new option, then wait for kafka restart to solve
# it again
os.remove(meta_file_path)
# create new meta.properties for the current broker
try:
meta_file = open(meta_file_path, 'w+')
meta_file.write('#\n')
meta_file.write('#' + str(datetime.now()) + ', generated by external script.\n')
meta_file.write('version=0\n')
meta_file.write('broker.id=' + str(broker_id) + '\n')
except OSError as exception:
print("Write file (%s) error, the error is %s." % (meta_file, exception.message))
return
def manage_meta(zookeeper_url, log_dir, meta_file, min_broker_id, max_broker_id):
# configure log
logging.basicConfig()
# Print the pass through parameters
print (
"Create kafka meta.properties if it is not exist with arg [zookeeper-url : %s, log-dir : %s, broker range in "
"%d, %d]." % (zookeeper_url, log_dir, min_broker_id, max_broker_id))
# Connect to zookeeper
zk = KazooClient(hosts=zookeeper_url)
try:
zk.start(timeout=900)
except Exception as e:
print("connection to zookeeper failed after 15 minutes timeout, the error is %s" % e)
# retrieve brokers
broker_path = "/brokers/ids"
if zk.exists(broker_path):
broker_path_existed = True
broker_ids = zk.get_children(broker_path)
str_broker_ids = []
if len(broker_ids) > 0:
for broker_id in broker_ids:
str_broker_ids.append(str(broker_id))
else:
broker_path_existed = False
if broker_path_existed:
missed_broker_id = look_for_missed_broker_id(str_broker_ids, min_broker_id, max_broker_id)
else:
missed_broker_id = -1
if missed_broker_id != -1:
# sleep seconds are aimed to avoid potential broker id conflict, especially for brand new cluster deployed
sleep_seconds = (missed_broker_id - min_broker_id + 1) * 2
print("Next broker id: %s, sleep: %d seconds." % (missed_broker_id, sleep_seconds))
time.sleep(sleep_seconds)
# create or update meta.properties
create_or_update_meta(log_dir, meta_file, missed_broker_id)
# disconnect to zookeeper
zk.stop()
# if __name__ == "__main__":
# """
# This script is aime to control broker id generated strategy.
#
# scenarios
# ---------
# 1, broker id may be conflict between each other when try to create the whole kafka cluster with quasar on aws.
# 2, retirement one of the existed broker, the auto launched node with not expected broker id, e.g 1007, 1008.
# 3, especially for case 2), if the new node will different broker id, we need to do manual partition assignment.
#
# what does this script do?
# ---------
# 1, grab broker ids from remote zookeeper.
# 2, calculate the first excepted id from a indicated scope, e.g [1001, 1006].
# 3, create kafka log folder and meta.properties with the calculated id if it's a brand new node.
# 4, or 3), update the broker id existed in the meta.properties with the new id if there is no any topic in the log
# folder, this case is aimed to solve the potential broker conflict when try to launch new cluster.
# """
#
# parser = argparse.ArgumentParser(description='Kafka meta.properties creation if it is not exist')
# parser.add_argument('--zookeeper-url', default="localhost:2181", help='URL of zookeeper')
# parser.add_argument('--log-dir', default="/kafka/data", help='Dir of kafka log')
# parser.add_argument('--meta-file', default="meta.properties", help='Meta file of kafka')
# parser.add_argument('--min-broker-id', type=int, default=1001, help='The min broker id, e.g 1001')
# parser.add_argument('--max-broker-id', type=int, default=9999, help='The max broker id, e.g 9999')
# args = parser.parse_args()
#
#
#
# manage_meta(args.zookeeper_url, args.log_dir, args.meta_file, args.min_broker_id, args.max_broker_id) | zpython-tools | /zpython_tools-0.1.1.tar.gz/zpython_tools-0.1.1/zpython_tools/zkafka/broker_meta_management.py | broker_meta_management.py |
# zpytrading
Python - Zinnion SDK
https://pypi.org/project/zpytrading/
`pip3 install --upgrade --force-reinstall --no-cache-dir zpytrading`
`sudo -H pip3 install zpytrading`
### Requirements
You need to download and export the path to `libztrading.so` https://github.com/Zinnion/zpytrading/wiki
### Example
```Python
import zpytrading
import json
import os
import sys
def init():
zTrading = zpytrading.ZinnionAPI()
streamingr_config = '{"subscriptions": [ "BINANCE_SPOT_BTC_USDT"], "channels": ["trade","indicator"], "comment": "lets do it" }'
zTrading.add_streaming(streamingr_config)
indicators_config = '{"indicators_config":[{"indicator_name":"decay","name":"","plot":true,"symbol_id":"BINANCE_SPOT_BTC_USDT","options":[9],"data_in_bar_type":["close"],"bar_type":"simple","timeframe":1,"max_bars":30}]}'
zTrading.add_indicators(indicators_config)
zTrading.start_streaming(handle_data)
def handle_data(self, data):
print(data)
if __name__ == "__main__":
init()
```
## Indicator Listing
```
104 total indicators
Overlay
avgprice Average Price
bbands Bollinger Bands
dema Double Exponential Moving Average
ema Exponential Moving Average
hma Hull Moving Average
kama Kaufman Adaptive Moving Average
linreg Linear Regression
medprice Median Price
psar Parabolic SAR
sma Simple Moving Average
tema Triple Exponential Moving Average
trima Triangular Moving Average
tsf Time Series Forecast
typprice Typical Price
vidya Variable Index Dynamic Average
vwma Volume Weighted Moving Average
wcprice Weighted Close Price
wilders Wilders Smoothing
wma Weighted Moving Average
zlema Zero-Lag Exponential Moving Average
Indicator
ad Accumulation/Distribution Line
adosc Accumulation/Distribution Oscillator
adx Average Directional Movement Index
adxr Average Directional Movement Rating
ao Awesome Oscillator
apo Absolute Price Oscillator
aroon Aroon
aroonosc Aroon Oscillator
atr Average True Range
bop Balance of Power
cci Commodity Channel Index
cmo Chande Momentum Oscillator
cvi Chaikins Volatility
di Directional Indicator
dm Directional Movement
dpo Detrended Price Oscillator
dx Directional Movement Index
emv Ease of Movement
fisher Fisher Transform
fosc Forecast Oscillator
kvo Klinger Volume Oscillator
linregintercept Linear Regression Intercept
linregslope Linear Regression Slope
macd Moving Average Convergence/Divergence
marketfi Market Facilitation Index
mass Mass Index
mfi Money Flow Index
mom Momentum
msw Mesa Sine Wave
natr Normalized Average True Range
nvi Negative Volume Index
obv On Balance Volume
ppo Percentage Price Oscillator
pvi Positive Volume Index
qstick Qstick
roc Rate of Change
rocr Rate of Change Ratio
rsi Relative Strength Index
stoch Stochastic Oscillator
stochrsi Stochastic RSI
tr True Range
trix Trix
ultosc Ultimate Oscillator
vhf Vertical Horizontal Filter
volatility Annualized Historical Volatility
vosc Volume Oscillator
wad Williams Accumulation/Distribution
willr Williams %R
Math
crossany Crossany
crossover Crossover
decay Linear Decay
edecay Exponential Decay
lag Lag
max Maximum In Period
md Mean Deviation Over Period
min Minimum In Period
stddev Standard Deviation Over Period
stderr Standard Error Over Period
sum Sum Over Period
var Variance Over Period
Simple
abs Vector Absolute Value
acos Vector Arccosine
add Vector Addition
asin Vector Arcsine
atan Vector Arctangent
ceil Vector Ceiling
cos Vector Cosine
cosh Vector Hyperbolic Cosine
div Vector Division
exp Vector Exponential
floor Vector Floor
ln Vector Natural Log
log10 Vector Base-10 Log
mul Vector Multiplication
round Vector Round
sin Vector Sine
sinh Vector Hyperbolic Sine
sqrt Vector Square Root
sub Vector Subtraction
tan Vector Tangent
tanh Vector Hyperbolic Tangent
todeg Vector Degree Conversion
torad Vector Radian Conversion
trunc Vector Truncate
```
| zpytrading | /zpytrading-0.0.21.tar.gz/zpytrading-0.0.21/README.md | README.md |
ZPyWallet
===========
.. image:: https://img.shields.io/pypi/pyversions/zpywallet.svg?maxAge=60
:target: https://pypi.python.org/pypi/zpywallet
:alt: Python version
.. image:: https://img.shields.io/pypi/v/zpywallet.svg?maxAge=60
:target: https://pypi.python.org/pypi/zpywallet
:alt: PyPi version
.. image:: https://img.shields.io/pypi/status/zpywallet.svg?maxAge=60
:target: https://pypi.python.org/pypi/zpywallet
:alt: PyPi status
.. image:: https://codecov.io/gh/ZenulAbidin/zpywallet/branch/master/graph/badge.svg?token=G2tC6LpTNm
:target: https://codecov.io/gh/ZenulAbidin/zpywallet
:alt: Code coverage
**Simple BIP32 (HD) wallet creation for: BTC, BCH, ETH, LTC, DASH, USDT (Omni), DOGE**
BIP32 (or HD for "hierarchical deterministic") wallets allow you to create
child wallets which can only generate public keys and don't expose a
private key to an insecure server.
This library simplifies the process of creating new wallets for the
BTC, BCH, ETH, LTC, DASH, USDT (Omni) and DOGE cryptocurrencies.
In addition, it can also create Bitcoin Bech32 addresses for all supported
witness versions.
This is a fork of `PyWallet <https://github.com/ranaroussi/pywallet>` with support for more coins, and some bugfixes.
Enjoy!
--------------
Installation
-------------
Install via PiP:
.. code:: bash
$ sudo pip install zpywallet
Example code:
=============
Create HD Wallet
----------------
The following code creates a new Bitcoin HD wallet:
.. code:: python
# create_btc_wallet.py
from zpywallet import wallet
# generate 12 word mnemonic seed
seed = wallet.generate_mnemonic()
# create bitcoin wallet
w = wallet.create_wallet(network="BTC", seed=seed, children=1)
print(w)
Output looks like this:
.. code:: bash
$ python create_btc_wallet.py
{
"coin": "BTC",
"seed": "guess tiny intact poet process segment pelican bright assume avocado view lazy",
"address": "1HwPm2tcdakwkTTWU286crWQqTnbEkD7av",
"xprivate_key": "xprv9s21ZrQH143K2Dizn667UCo9oYPdTPSMWq7D5t929aXf1kfnmW79CryavzBxqbWfrYzw8jbyTKvsiuFNwr1JL2qfrUy2Kbwq4WbBPfxYGbg",
"xpublic_key": "xpub661MyMwAqRbcEhoTt7d7qLjtMaE7rrACt42otGYdhv4dtYzwK3RPkfJ4nEjpFQDdT8JjT3VwQ3ZKjJaeuEdpWmyw16sY9SsoY68PoXaJvfU",
"wif": "L1EnVJviG6jR2oovFbfxZoMp1JknTACKLzsTKqDNUwATCWpY1Fp4",
"children": [{
"address": "1E3btRwsoJx2jUcMnATyx7poHhV2tomL8g",
"path": "m/0",
"xpublic_key": "xpub69Fho5TtAbdoXyWzgUV1ZYst9K4bVfoGNLZxQ9u5js4Rb1jEyUjDtoATXbWvAcV8cERCMMnH8wYRVVUsRDSfaMjLqaY3TvD7Am9ALjq5PsG",
"wif": "KysRDiwJNkS9VPzy1UH76DrCDizsWKtEooSzikich792RVzcUaJP"
}]
}
Similarly, you can do the same for an Ethereum wallet:
.. code:: python
# create_eth_wallet.py
from zpywallet import wallet
seed = wallet.generate_mnemonic()
w = wallet.create_wallet(network="ETH", seed=seed, children=1)
print(w)
Output looks like this (no WIF for Ethereum):
.. code:: bash
$ python create_eth_wallet.py
{
"coin": "ETH",
"seed": "traffic happy world clog clump cattle great toy game absurd alarm auction",
"address": "0x3b777f60eb04fcb13e6b27e468532e491409722e",
"xprivate_key": "xprv9yTuSjwb95QZznV6epMWpb4Kpc2S8ZRaQuAf5B697YXtQD2tDmmJ5KvwJWVjtbVrdJ1WBKNnuodrpTKGfHfiPSEgrAxUjL5RP1gQwwT3fFx",
"xpublic_key": "xpub6GhhMtkVjoPi5DKtqapKzMzrzdGjo1EPc7Ka6KdeoXYdCrTBH1Hu1wKysm8boWSy8VeTKVJi6gQJ2qJ4YG2ZhvFDcUUgMJrFCJWN1PGtBry",
"wif": "",
"children": [{
"address": "0x87eb82d43fa7316df0a989c0d951a9037ed02f9b",
"path": "m/0",
"xpublic_key": "xpub6LnpVXD73jNuAYXxzQCnEY6wXQspwkiAEkZWoX4BW9Tzx6KbUrMUYAU1Yvw4kebPHSPiEJPo8irHWHSwQR6WuVwUj85xURsugPWeJVH6sau",
"wif": ""
}]
}
\* Valid options for `network` are: BTC, BTG, BCH, LTC, DASH, DOGE
Create Child Wallet
-------------------
You can create child-wallets (BIP32 wallets) from the HD wallet's
**Extended Public Key** to generate new public addresses without
revealing your private key.
Example:
.. code-block:: python
# create_child_wallet.py
from zpywallet import wallet
WALLET_PUBKEY = 'YOUR WALLET XPUB'
# generate address for specific user (id = 10)
user_addr = wallet.create_address(network="BTC", xpub=WALLET_PUBKEY, child=10)
# or generate a random address, based on timestamp
rand_addr = wallet.create_address(network="BTC", xpub=WALLET_PUBKEY)
print("User Address\n", user_addr)
print("Random Address\n", rand_addr)
Output looks like this:
.. code:: bash
$ python create_child_wallet.py
User Address
{
"address": "13myudz3WhpBezoZue6cwRUoHrzWs4vCrb",
"path": "m/0/395371597"
}
Random Address
{
"address": "1KpS2wC5J8bDsGShXDHD7qdGvnic1h27Db",
"path": "m/0/394997119"
}
-----
CONTRIBUTING
============
Bugfixes and enhancements are welcome. Please read CONTRIBUTING.md for contributing instructions.
At the moment, I'm not accepting pull requests for new coins unless they are big and historic coins such as Tether (ERC20), BNB and XMR.
IMPORTANT
=========
I **highly** recommend that you familiarize yourself with the Blockchain technology and
be aware of security issues.
Reading `Mastering Bitcoin <https://github.com/bitcoinbook/bitcoinbook>`_ and going over
Steven Buss's security notes on the `Bitmerchant repository <https://github.com/sbuss/bitmerchant>`_
is a good start.
Enjoy!
| zpywallet | /zpywallet-0.4.0.tar.gz/zpywallet-0.4.0/README.rst | README.rst |
Python Utilities
================
This repo collects some small Python utilities that I have created (or collected) in practice,
and organizes them as a package named `zpz`.
These utilities are not "one-off" experiments; they are really useful code.
However, they do not form a coherent set of utilities for a particular application domain.
There is no plan to maintain this code as a coherent library.
This package is uploaded to [pypi](https://pypi.org/project/zpz/).
Consider it to be mainly for the author's personal convenience.
Do not assume the releases will be maintained in a stable, regular, and backward-compatible way.
One reasonable way to use it is to copy-paste whatever segments you find useful.
To install, do
```
pip install zpz
```
There are a few optional dependencies specified by 'avro' and 'lineprofiler'.
| zpz | /zpz-0.5.1.tar.gz/zpz-0.5.1/README.md | README.md |
<div align="center">
# zq-auth-sdk-python
**自强统一认证 Python SDK**
<!-- markdownlint-disable-next-line MD036 -->
</div>
<p align="center">
<a href="https://github.com/ZiqiangStudio/zq_auth_sdk_python/actions/workflows/test.yml">
<img src="https://github.com/ZiqiangStudio/zq_auth_sdk_python/actions/workflows/test.yml/badge.svg" alt="CI">
</a>
<a href="https://zq_auth_sdk_python.readthedocs.io/en/latest/?badge=latest">
<img src="https://readthedocs.org/projects/zq-django-util/badge/?version=latest" alt="Documentation Status" />
</a>
<a href="https://codecov.io/gh/ZiqiangStudio/zq_auth_sdk_python" >
<img src="https://codecov.io/gh/ZiqiangStudio/zq_auth_sdk_python/branch/master/graph/badge.svg" alt="cov"/>
</a>
<a href="https://pypi.org/project/zq-django-util/">
<img src="https://img.shields.io/pypi/v/zq-django-util" alt="pypi">
</a>
</p>
<!-- markdownlint-enable MD033 -->
## 简介
## 依赖需求
- Python 3.9+
## 安装
- 安装 zq-auth-sdk 包
使用 `pip` 安装:
```shell
pip install zq-auth-sdk
```
使用 `poetry` 安装:
```shell
poetry add zq-auth-sdk
```
- 使用 zq-django-util
> TODO
| zq-auth-sdk | /zq_auth_sdk-0.1.0.tar.gz/zq_auth_sdk-0.1.0/README.md | README.md |
import base64
import copy
import hashlib
import hmac
import logging
import random
import string
logger = logging.getLogger(__name__)
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access."""
def __getattr__(self, key):
if key in self:
return self[key]
return None
def __setattr__(self, key, value):
self[key] = value
class ZqAuthSigner:
"""ZqAuth data signer"""
def __init__(self, delimiter=b""):
self._data = []
self._delimiter = to_binary(delimiter)
def add_data(self, *args):
"""Add data to signer"""
for data in args:
self._data.append(to_binary(data))
@property
def signature(self):
"""Get data signature"""
self._data.sort()
str_to_sign = self._delimiter.join(self._data)
return hashlib.sha1(str_to_sign).hexdigest()
def check_signature(token, signature, timestamp, nonce):
"""Check ZqAuth callback signature, raises InvalidSignatureException
if check failed.
:param token: ZqAuth callback token
:param signature: ZqAuth callback signature sent by ZqAuth server
:param timestamp: ZqAuth callback timestamp sent by ZqAuth server
:param nonce: ZqAuth callback nonce sent by ZqAuth sever
"""
signer = ZqAuthSigner()
signer.add_data(token, timestamp, nonce)
if signer.signature != signature:
from zq_auth_sdk.exceptions import InvalidSignatureException
raise InvalidSignatureException()
def check_request_signature(session_key, raw_data, client_signature):
"""校验前端传来的rawData签名正确
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/framework/open-ability/signature.html # noqa
:param session_key: code换取的session_key
:param raw_data: 前端拿到的rawData
:param client_signature: 前端拿到的signature
:raises: InvalidSignatureException
:return: 返回数据dict
"""
str2sign = (raw_data + session_key).encode("utf-8")
signature = hashlib.sha1(str2sign).hexdigest()
if signature != client_signature:
from zq_auth_sdk.exceptions import InvalidSignatureException
raise InvalidSignatureException()
def to_text(value, encoding="utf-8"):
"""Convert value to unicode, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return ""
if isinstance(value, str):
return value
if isinstance(value, bytes):
return value.decode(encoding)
return str(value)
def to_binary(value, encoding="utf-8"):
"""Convert value to binary string, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return b""
if isinstance(value, bytes):
return value
if isinstance(value, str):
return value.encode(encoding)
return to_text(value).encode(encoding)
def random_string(length=16):
rule = string.ascii_letters + string.digits
rand_list = random.sample(rule, length)
return "".join(rand_list)
def now(tz=None):
"""
获取当前aware时间
:return:
"""
import datetime
if tz:
return datetime.datetime.now(tz)
return datetime.datetime.now().astimezone()
def format_url(params, api_key=None):
data = [to_binary(f"{k}={params[k]}") for k in sorted(params) if params[k]]
if api_key:
data.append(to_binary(f"key={api_key}"))
return b"&".join(data)
def calculate_signature(params, api_key):
url = format_url(params, api_key)
logger.debug("Calculate Signature URL: %s", url)
return to_text(hashlib.md5(url).hexdigest().upper())
def calculate_signature_hmac(params, api_key):
url = format_url(params, api_key)
sign = to_text(
hmac.new(api_key.encode(), msg=url, digestmod=hashlib.sha256)
.hexdigest()
.upper()
)
return sign
def _check_signature(params, api_key):
_params = copy.deepcopy(params)
sign = _params.pop("sign", "")
return sign == calculate_signature(_params, api_key)
def rsa_encrypt(data, pem, b64_encode=True):
"""
rsa 加密
:param data: 待加密字符串/binary
:param pem: RSA public key 内容/binary
:param b64_encode: 是否对输出进行 base64 encode
:return: 如果 b64_encode=True 的话,返回加密并 base64 处理后的 string;否则返回加密后的 binary
"""
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
encoded_data = to_binary(data)
pem = to_binary(pem)
public_key = serialization.load_pem_public_key(pem)
encrypted_data = public_key.encrypt(
encoded_data,
padding=padding.OAEP(
mgf=padding.MGF1(hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
if b64_encode:
encrypted_data = base64.b64encode(encrypted_data).decode("utf-8")
return encrypted_data
def rsa_decrypt(encrypted_data, pem, password=None):
"""
rsa 解密
:param encrypted_data: 待解密 bytes
:param pem: RSA private key 内容/binary
:param password: RSA private key pass phrase
:return: 解密后的 binary
"""
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
encrypted_data = to_binary(encrypted_data)
pem = to_binary(pem)
private_key = serialization.load_pem_private_key(pem, password)
data = private_key.decrypt(
encrypted_data,
padding=padding.OAEP(
mgf=padding.MGF1(hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
return data | zq-auth-sdk | /zq_auth_sdk-0.1.0.tar.gz/zq_auth_sdk-0.1.0/zq_auth_sdk/utils.py | utils.py |
import inspect
import logging
from datetime import datetime, timedelta
from typing import Callable
import requests
from zq_auth_sdk.client.api.base import BaseZqAuthAPI
from zq_auth_sdk.entities.response import ZqAuthResponse, ZqAuthResponseType
from zq_auth_sdk.entities.types import JSONVal
from zq_auth_sdk.exceptions import (
APILimitedException,
AppLoginFailedException,
ZqAuthClientException,
)
from zq_auth_sdk.storage import SessionStorage
from zq_auth_sdk.storage.memorystorage import MemoryStorage
from zq_auth_sdk.utils import now
logger = logging.getLogger(__name__)
def _is_api_endpoint(obj):
return isinstance(obj, BaseZqAuthAPI)
class BaseWeChatClient:
API_BASE_URL: str = ""
ACCESS_LIFETIME: timedelta | None = None # access token的有效期
REFRESH_LIFETIME: timedelta | None = None # refresh token的有效期
_http: requests.Session
appid: str
storage: SessionStorage
timeout: int | None
auto_retry: bool
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, api in api_endpoints:
api_cls = type(api)
api = api_cls(self)
setattr(self, name, api)
return self
def __init__(
self,
appid: str,
access_token: str | None = None,
storage: SessionStorage | None = None,
timeout: int | None = None,
auto_retry: bool = True,
):
self._http = requests.Session()
self.appid = appid
self.storage = storage or MemoryStorage()
self.timeout = timeout
self.auto_retry = auto_retry
if access_token:
self.storage.set(self.access_token_key, access_token)
if self.API_BASE_URL == "":
raise Exception("API_BASE_URL is not defined")
elif self.API_BASE_URL.endswith("/"):
self.API_BASE_URL = self.API_BASE_URL[:-1]
# region storage
# region access
@property
def access_token_key(self) -> str:
"""
access token 缓存key
"""
return f"{self.appid}_access_token"
@property
def access_token(self):
"""ZqAuth access token"""
access_token = self.storage.get(self.access_token_key)
if access_token:
if not self.expire_time:
# user provided access_token, just return it
return access_token
if self.expire_time - now() > timedelta(seconds=60):
return access_token
self.refresh_access_token()
return self.storage.get(self.access_token_key)
@access_token.setter
def access_token(self, value):
self.storage.set(self.access_token_key, value, self.ACCESS_LIFETIME)
# endregion
# region expire_time
@property
def access_token_expire_time_key(self) -> str:
"""
access token 过期时间缓存key
"""
return f"{self.appid}_access_token_expire_time"
@property
def expire_time(self) -> datetime | None:
"""
access token 过期时间
"""
iso_time = self.storage.get(self.access_token_expire_time_key, None)
return (
datetime.fromisoformat(iso_time) if iso_time is not None else None
)
@expire_time.setter
def expire_time(self, value: datetime):
self.storage.set(self.access_token_expire_time_key, value.isoformat())
# endregion
# region refresh
@property
def refresh_token_key(self) -> str:
"""
refresh token 缓存key
"""
return f"{self.appid}_refresh_token"
@property
def refresh_token(self) -> str | None:
"""ZqAuth refresh token"""
return self.storage.get(self.refresh_token_key, None)
@refresh_token.setter
def refresh_token(self, value: str | None):
if value is None:
self.storage.delete(self.refresh_token_key)
self.storage.set(self.refresh_token_key, value, self.REFRESH_LIFETIME)
# endregion
# region id
@property
def id_key(self) -> str:
"""
id 缓存key
"""
return f"{self.appid}_id"
@property
def id(self) -> int:
"""ZqAuth id"""
res = self.storage.get(self.id_key, None)
if res is None:
self._login()
return self.storage.get(self.id_key)
@id.setter
def id(self, value: int):
self.storage.set(self.id_key, value)
# endregion
# region name
@property
def name_key(self) -> str:
"""
name 缓存key
"""
return f"{self.appid}_name"
@property
def name(self) -> str:
"""ZqAuth name"""
res = self.storage.get(self.name_key, None)
if res is None:
self._login()
return self.storage.get(self.name_key)
@name.setter
def name(self, value: str):
self.storage.set(self.name_key, value)
# endregion
# region username
@property
def username_key(self) -> str:
"""
username 缓存key
"""
return f"{self.appid}_username"
@property
def username(self) -> str:
"""ZqAuth username"""
res = self.storage.get(self.username_key, None)
if res is None:
self._login()
return self.storage.get(self.username_key)
@username.setter
def username(self, value: str):
self.storage.set(self.username_key, value)
# endregion
# endregion
def _request(
self,
method: str,
url_or_endpoint: str,
auth: bool = True,
params: dict | None = None,
data: str | bytes | dict | None = None,
timeout: int | None = None,
result_processor: Callable[[JSONVal], JSONVal] = None,
auto_retry: bool | None = None,
**kwargs,
) -> JSONVal:
"""
发起请求
:param method: 请求方法
:param url_or_endpoint: 请求地址
:param auth: 是否使用token认证(默认开启)
:param params: 请求query参数
:param data: 请求体
:param timeout: 超时时长
:param result_processor: 结果处理函数
:param auto_retry: token过期是否自动重试
:param kwargs:
:return: JSON 返回
"""
if not url_or_endpoint.startswith(
("http://", "https://")
): # 传入 endpoint
api_base_url = kwargs.pop("api_base_url", self.API_BASE_URL)
if api_base_url.endswith("/"):
api_base_url = api_base_url[:-1]
url = f"{api_base_url}{url_or_endpoint}" # base url 拼接到 endpoint 前
else:
url = url_or_endpoint
if not params:
params = {}
kwargs["params"] = params
kwargs["data"] = data
kwargs["timeout"] = timeout or self.timeout
if auth:
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = f"Bearer {self.access_token}"
response = self._http.request(method=method, url=url, **kwargs) # 发起请求
logger.debug(f"Request: {method} {url}")
return self._handle_result(
response, method, url, result_processor, auto_retry, **kwargs
)
def _handle_result(
self,
response: requests.Response,
method: str | None = None,
url: str | None = None,
result_processor: Callable[[JSONVal], JSONVal] = None,
auto_retry: bool | None = None,
**kwargs,
) -> JSONVal:
response = ZqAuthResponse(response, self)
if auto_retry is None:
auto_retry = self.auto_retry
if response.code != ZqAuthResponseType.Success.code:
if auto_retry and response.code in [
ZqAuthResponseType.TokenInvalid.code,
]:
logger.info(
"Access token expired, fetch a new one and retry request"
)
# 刷新 access token
self.refresh_access_token()
# 重试请求
return self._request(
method=method,
url_or_endpoint=url,
result_processor=result_processor,
auto_retry=False,
**kwargs,
)
elif response.code == ZqAuthResponseType.APIThrottled.code:
# api freq out of limit
response.check_exception(APILimitedException)
else:
response.check_exception()
return (
response.data
if not result_processor
else result_processor(response.data)
)
def get(
self,
url: str,
auth: bool = True,
params: dict | None = None,
data: str | bytes | dict | None = None,
timeout: int | None = None,
result_processor: Callable[[JSONVal], JSONVal] = None,
auto_retry: bool | None = None,
**kwargs,
):
"""
GET 请求
:param url: 请求地址
:param auth: 是否使用token认证(默认开启)
:param params: 请求query参数
:param data: 请求体
:param timeout: 超时时长
:param result_processor: 结果处理函数
:param auto_retry: token过期是否自动重试
:param kwargs:
:return: JSON 返回
"""
return self._request(
method="get",
url_or_endpoint=url,
auth=auth,
params=params,
data=data,
timeout=timeout,
result_processor=result_processor,
auto_retry=auto_retry,
**kwargs,
)
def post(
self,
url: str,
auth: bool = True,
params: dict | None = None,
data: str | bytes | dict | None = None,
timeout: int | None = None,
result_processor: Callable[[JSONVal], JSONVal] = None,
auto_retry: bool | None = None,
**kwargs,
):
"""
POST 请求
:param url: 请求地址
:param auth: 是否使用token认证(默认开启)
:param params: 请求query参数
:param data: 请求体
:param timeout: 超时时长
:param result_processor: 结果处理函数
:param auto_retry: token过期是否自动重试
:param kwargs:
:return: JSON 返回
"""
return self._request(
method="post",
url_or_endpoint=url,
auth=auth,
params=params,
data=data,
timeout=timeout,
result_processor=result_processor,
auto_retry=auto_retry,
**kwargs,
)
def refresh_access_token(self):
"""fetch access token"""
logger.info("Fetching access token")
self._refresh()
def login(self) -> JSONVal:
"""
登录接口,用于获取 access_token 和 refresh_token (可选)
:return: 响应
"""
raise NotImplementedError()
def _login(self):
"""
登录,完善APP信息
:return:
"""
logger.info("login using credentials")
try:
result = self.login()
except ZqAuthClientException as e:
if e.errcode == ZqAuthResponseType.LoginFailed.code:
logger.error("App login failed, please check your credentials")
raise AppLoginFailedException(
e.errcode, e.errmsg, e.client, e.request, e.response
)
else:
raise e
self.id = result.get("id")
self.name = result.get("name")
self.username = result.get("username")
self.access_token = result.get("access")
self.refresh_token = result.get("refresh", None)
self.expire_time = datetime.fromisoformat(result.get("expire_time"))
def refresh(self) -> JSONVal:
"""
刷新 access_token
"""
raise NotImplementedError()
def _refresh(self):
logger.info("refresh access_token")
if self.refresh_token is None:
self._login()
return
try:
result = self.refresh()
self.access_token = result.get("access")
self.expire_time = datetime.fromisoformat(result.get("expire_time"))
except ZqAuthClientException as e:
if (
e.errcode == ZqAuthResponseType.RefreshTokenInvalid.code
): # refresh token 过期
self.refresh_token = None
self._login()
else:
raise e | zq-auth-sdk | /zq_auth_sdk-0.1.0.tar.gz/zq_auth_sdk-0.1.0/zq_auth_sdk/client/base.py | base.py |
import uuid
from zq_auth_sdk.client.api.base import BaseZqAuthAPI
from zq_auth_sdk.entities.response import ZqAuthResponseType
from zq_auth_sdk.exceptions import (
ThirdLoginFailedException,
UserNotFoundException,
ZqAuthClientException,
)
class ZqAuthApp(BaseZqAuthAPI):
def test(self):
"""
测试接口
https://console-docs.apipost.cn/preview/7abdc86c0ce49501/bf92b4d8832fa312?target_id=ca539c47-8e3e-4314-a560-2913a36294b0 # noqa
"""
return self._get("/")
def app_info(self):
"""
获取 app 信息
https://console-docs.apipost.cn/preview/7abdc86c0ce49501/bf92b4d8832fa312?target_id=b66a33e6-ae37-4841-a540-69c1c07c133d # noqa
"""
return self._get(f"/apps/{self.id}/")
def sso(self, code: str):
"""
sso 单点登录 获取用户 union id
https://console-docs.apipost.cn/preview/7abdc86c0ce49501/bf92b4d8832fa312?target_id=b66a33e6-ae37-4841-a540-69c1c07c133d # noqa
:param code: 临时 code
:raise ThirdLoginFailedException: code 无效
"""
try:
return self._post(url="/sso/union-id/", data={"code": code})
except ZqAuthClientException as e:
if e.errcode == ZqAuthResponseType.ResourceNotFound.code:
raise ThirdLoginFailedException(
e.errcode, e.errmsg, e.client, e.request, e.response
)
else:
raise e
def user_info(self, union_id: uuid.UUID | str, detail: bool = True):
"""
获取用户信息
https://console-docs.apipost.cn/preview/7abdc86c0ce49501/bf92b4d8832fa312?target_id=1fad0fc0-12f9-4dc2-8ed6-cdffa2b2fd4e # noqa
:param union_id: 用户 union id
:param detail: 是否返回详细信息
:raise UserNotFountException: union-id 无效 (用户解除绑定)
"""
if isinstance(union_id, uuid.UUID):
union_id = union_id.hex
try:
return self._get(f"/users/{union_id}/", params={"detail": detail})
except ZqAuthClientException as e:
if e.errcode == ZqAuthResponseType.ResourceNotFound.code:
raise UserNotFoundException(
e.errcode, e.errmsg, e.client, e.request, e.response
)
else:
raise e | zq-auth-sdk | /zq_auth_sdk-0.1.0.tar.gz/zq_auth_sdk-0.1.0/zq_auth_sdk/client/api/app.py | app.py |
from dataclasses import dataclass
from enum import Enum, unique
from typing import TYPE_CHECKING, Type
from requests import Response
from zq_auth_sdk.exceptions import ZqAuthClientException
if TYPE_CHECKING:
from zq_auth_sdk.entities.types import JSONVal
class ZqAuthResponseTypeEnum(Enum):
@property
def code(self) -> str:
"""
根据枚举名称取状态码code
:return: 状态码code
"""
return self.value[0]
@property
def detail(self) -> str:
"""
根据枚举名称取状态说明message
:return: 状态说明message
"""
return self.value[1]
@property
def status_code(self) -> int:
"""
根据枚举名称取状态码status_code
:return: 状态码status_code
"""
return self.value[2]
# region ResponseType
@unique
class ZqAuthResponseType(ZqAuthResponseTypeEnum):
"""API状态类型"""
Success = ("00000", "", 200)
ClientError = ("A0000", "用户端错误", 400)
LoginFailed = ("A0210", "用户登录失败", 400)
UsernameNotExist = ("A0211", "用户名不存在", 400)
PasswordWrong = ("A0212", "用户密码错误", 400)
LoginFailedExceed = ("A0213", "用户输入密码次数超限", 400)
PhoneNotExist = ("A0214", "手机号不存在", 400)
LoginExpired = ("A0220", "用户登录已过期", 401)
TokenInvalid = ("A0221", "token 无效或已过期", 401)
RefreshTokenInvalid = ("A0221", "refresh token 无效或已过期", 401)
ThirdLoginFailed = ("A0230", "用户第三方登录失败", 401)
ThirdLoginCaptchaError = ("A0232", "用户第三方登录验证码错误", 401)
ThirdLoginExpired = ("A0233", "用户第三方登录已过期", 401)
PermissionError = ("A0300", "用户权限异常", 403)
NotLogin = ("A0310", "用户未登录", 401)
NotActive = ("A0311", "用户未激活", 403)
PermissionDenied = ("A0312", "用户无权限", 403)
ServiceNotAvailable = ("A0313", "不在服务时段", 403)
UserBlocked = ("A0320", "黑名单用户", 403)
UserFrozen = ("A0321", "账号被冻结", 403)
IPInvalid = ("A0322", "非法 IP 地址", 401)
ParamError = ("A0400", "用户请求参数错误", 400)
JSONParseFailed = ("A0410", "请求 JSON 解析错误", 400)
ParamEmpty = ("A0420", "请求必填参数为空", 400)
ParamValidationFailed = ("A0430", "请求参数值校验失败", 400)
RequestError = ("A0500", "用户请求服务异常", 400)
APINotFound = ("A0510", "请求接口不存在", 404)
MethodNotAllowed = ("A0511", "请求方法不允许", 405)
APIThrottled = ("A0512", "请求次数超出限制", 429)
HeaderNotAcceptable = ("A0513", "请求头无法满足", 406)
ResourceNotFound = ("A0514", "请求资源不存在", 404)
UploadError = ("A0600", "用户上传文件异常", 400)
UnsupportedMediaType = ("A0610", "用户上传文件类型不支持", 400)
UnsupportedMediaSize = ("A0613", "用户上传文件大小错误", 400)
VersionError = ("A0700", "用户版本异常", 400)
AppVersionError = ("A0710", "用户应用安装版本不匹配", 400)
APIVersionError = ("A0720", "用户 API 请求版本不匹配", 400)
ServerError = ("B0000", "系统执行出错", 500)
ServerTimeout = ("B0100", "系统执行超时", 500)
ServerResourceError = ("B0200", "系统资源异常", 500)
ThirdServiceError = ("C0000", "调用第三方服务出错", 500)
MiddlewareError = ("C0100", "中间件服务出错", 500)
ThirdServiceTimeoutError = ("C0200", "第三方系统执行超时", 500)
DatabaseError = ("C0300", "数据库服务出错", 500)
CacheError = ("C0400", "缓存服务出错", 500)
NotificationError = ("C0500", "通知服务出错", 500)
# endregion
@dataclass
class ZqAuthResponse:
"""API响应数据结构"""
code: str
detail: str
msg: str
data: "JSONVal"
_response = None
_client = None
def __init__(
self,
response: Response,
client=None,
raise_exception: bool = False,
):
"""
Api 响应
:param response: 响应
:param raise_exception: 是否对非正常code报错
:param client: 当前客户端
:raise ZqAuthClientException: 非正常响应异常
"""
self._response = response
self._client = client
result = response.json()
self.code = result["code"]
self.msg = result["msg"]
self.detail = result["detail"]
self.data = result["data"]
if raise_exception:
self.check_exception()
def __str__(self) -> str:
return f"code: {self.code}, detail: {self.detail}, msg: {self.msg}, data: {self.data}"
def __repr__(self):
return self.data
def check_exception(
self, exception: Type[ZqAuthClientException] = ZqAuthClientException
):
"""
检测 code 并 raise 异常
:param exception: 产生的异常
:return:
"""
if self.code != ZqAuthResponseType.Success.code:
raise exception(
errcode=self.code,
errmsg=self.msg,
client=self._client,
request=self._response.request,
response=self._response,
) | zq-auth-sdk | /zq_auth_sdk-0.1.0.tar.gz/zq_auth_sdk-0.1.0/zq_auth_sdk/entities/response.py | response.py |
# zq-config
----
# Introduction
zq-config is wrapper library for config centers, such as nacos, etcd, apollo.
Here is a program to use `nacos` :
```python
from zq_config import ZQ_Config
SERVER_ADDRESSES = "localhost:8848"
NAMESPACE = "sho-test"
USER_NAME= "nacos"
PASSWORD= "nacos"
zq = ZQ_Config("nacos", server_addresses=SERVER_ADDRESSES, namespace=NAMESPACE, username=USER_NAME, password=PASSWORD)
config = zq.get("config-id", "dev")
print(config)
```
# TODO
- [x] nacos
- [ ] etcd
- [ ] apollo
| zq-config | /zq-config-0.1.5.tar.gz/zq-config-0.1.5/README.md | README.md |
<div align="center">
# zq-django-template
**自强 Studio Django 模板**
<!-- markdownlint-disable-next-line MD036 -->
</div>
<p align="center">
<a href="https://zq-django-util.readthedocs.io/en/latest/?badge=latest">
<img src="https://readthedocs.org/projects/zq-django-template/badge/?version=latest" alt="Documentation Status" >
</a>
<a href="https://pypi.org/project/zq-django-template/">
<img src="https://img.shields.io/pypi/v/zq-django-template" alt="pypi">
</a>
</p>
<!-- markdownlint-enable MD033 -->
[English Version](README_EN.md)
## 简介
zq-django-util 是自强 Studio 开发的 Django 模板,用于快速搭建 Django+DRF 项目。其中包括:
- 使用 [zq-django-util](https://pypi.org/project/zq-django-util/) 工具搭建的基础框架
- JWT 认证
- OSS 存储、直传示例
- 微信小程序登录示例(可选)
- Sentry 监控(可选)
- Celery 异步任务(可选)
- git 提交规范与代码检查
- Docker 配置与自动化部署
## 使用说明
详见 [使用说明文档](docs/usage.md)
## 开发
使用 poetry 安装依赖,并进行项目修改
| zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/README.md | README.md |
from loguru import logger
import socket
from server.settings.components.common import INSTALLED_APPS, MIDDLEWARE
from server.settings.components.drf import REST_FRAMEWORK
# Setting the development status:
DEBUG = True
SERVER_URL = "https://"
ALLOWED_HOSTS = ["*"]
CSRF_TRUSTED_ORIGINS = [
"http://127.0.0.1:8000",
"http://localhost:8000",
]
CSRF_TRUSTED_ORIGINS += [SERVER_URL]
# Installed apps for development only:
INSTALLED_APPS += [
# Better debug:
"debug_toolbar",
"nplusone.ext.django",
# Linting migrations:
"django_migration_linter",
# django-extra-checks:
'extra_checks',
]
# Django debug toolbar:
# https://django-debug-toolbar.readthedocs.io
MIDDLEWARE += [
"debug_toolbar.middleware.DebugToolbarMiddleware",
# https://github.com/bradmontgomery/django-querycount
# Prints how many queries were executed, useful for the APIs.
"querycount.middleware.QueryCountMiddleware",
]
# https://django-debug-toolbar.readthedocs.io/en/stable/installation.html#configure-internal-ips
try: # This might fail on some OS
INTERNAL_IPS = [
"{0}.1".format(ip[: ip.rfind(".")])
for ip in socket.gethostbyname_ex(socket.gethostname())[2]
]
except socket.error: # pragma: no cover
INTERNAL_IPS = []
INTERNAL_IPS += ["127.0.0.1"]
def _custom_show_toolbar(request):
"""Only show the debug toolbar when in the debug mode"""
return DEBUG
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": "server.settings.environments.development._custom_show_toolbar",
}
# nplusone
# https://github.com/jmcarp/nplusone
# Should be the first in line:
MIDDLEWARE = [ # noqa: WPS440
"nplusone.ext.django.NPlusOneMiddleware",
] + MIDDLEWARE
# Logging N+1 requests:
NPLUSONE_RAISE = False # comment out if you want to allow N+1 requests
NPLUSONE_LOGGER = logger
NPLUSONE_LOG_LEVEL = logger.level("INFO")
NPLUSONE_WHITELIST = [
{"model": "admin.*"},
]
# django-test-migrations
# https://github.com/wemake-services/django-test-migrations
# Set of badly named migrations to ignore:
DTM_IGNORED_MIGRATIONS = frozenset((("axes", "*"),))
# django-extra-checks
# https://github.com/kalekseev/django-extra-checks
EXTRA_CHECKS = {
"checks": [
# 所有模型字段需要verbose_name解释:
"field-verbose-name",
# Forbid `unique_together`:
"no-unique-together",
# Require non empty `upload_to` argument:
"field-file-upload-to",
# Use the indexes option instead:
"no-index-together",
# Each model must be registered in admin:
"model-admin",
# FileField/ImageField must have non-empty `upload_to` argument:
"field-file-upload-to",
# Text fields shouldn't use `null=True`:
"field-text-null",
# Don't pass `null=False` to model fields (this is django default)
"field-null",
# ForeignKey fields must specify db_index explicitly if used in
# other indexes:
{"id": "field-foreign-key-db-index", "when": "indexes"},
# If field nullable `(null=True)`,
# then default=None argument is redundant and should be removed:
"field-default-null",
# Fields with choices must have companion CheckConstraint
# to enforce choices on database level
"field-choices-constraint",
# Related fields must specify related_name explicitly:
"field-related-name",
],
}
# DRF Browsable API Login
REST_FRAMEWORK["DEFAULT_RENDERER_CLASSES"] = REST_FRAMEWORK.get(
"DEFAULT_RENDERER_CLASSES", []
) + [
"rest_framework.renderers.BrowsableAPIRenderer"
]
REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = REST_FRAMEWORK.get(
"DEFAULT_AUTHENTICATION_CLASSES", []
) + [
"rest_framework.authentication.SessionAuthentication",
] | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/settings/environments/development.py | development.py |
import datetime
from server.settings.util import BASE_DIR, config
SECRET_KEY = config("DJANGO_SECRET_KEY")
# region Application
DJANGO_APPS: list[str] = [
"simpleui", # admin ui(必须在第一行)
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles"
]
THIRD_PARTY_APPS: list[str] = [
"rest_framework", # DRF
"corsheaders", # CORS 跨域
"rest_framework_simplejwt", # JWT
"drf_spectacular", # api 文档
"django_filters", # 过滤器
"zq_django_util.utils.oss", # oss
"method_override", # 方法重写
"drf_standardized_errors", # drf错误初步处理
'django_extensions', # Django 扩展
{%- if cookiecutter.use_celery == 'y' %}
"django_celery_results", # celery兼容支持
"django_celery_beat", # celery定时任务
{%- endif %}
'zq_django_util.logs', # 日志记录
]
LOCAL_APPS: list[str] = [
{%- if cookiecutter.use_celery == 'y' %}
"async_tasks", # celery异步任务
{%- endif %}
"users", # 用户
"oauth", # 认证
"files", # 文件
]
INSTALLED_APPS: list[str] = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE: list[str] = [
"corsheaders.middleware.CorsMiddleware", # CORS 跨域(最外层)
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"method_override.middleware.MethodOverrideMiddleware", # 请求方法修改
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"zq_django_util.logs.middleware.APILoggerMiddleware", # 请求日志
]
ROOT_URLCONF = "server.urls"
WSGI_APPLICATION = "server.wsgi.application"
ASGI_APPLICATION = "server.asgi.application"
# endregion
# region Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR.joinpath("server", "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# endregion
# region Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# endregion
# region Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "zh-hans"
TIME_ZONE = "Asia/Shanghai"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# endregion
# region CORS
CORS_ORIGIN_WHITELIST = (
"http://127.0.0.1:8000",
"http://localhost:8000",
"https://127.0.0.1:8000",
"https://localhost:8000",
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
# endregion
# region JWT
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": datetime.timedelta(days=1),
"REFRESH_TOKEN_LIFETIME": datetime.timedelta(days=10),
"ALGORITHM": "HS256",
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
}
# endregion
# region 用户模型
AUTH_USER_MODEL = "users.User"
USER_ID_FIELD = "id"
# endregion
RUNSERVER_PLUS_EXCLUDE_PATTERNS = [
"*\\Lib\\*",
"*/Lib/*",
] | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/settings/components/common.py | common.py |
import os
from copy import deepcopy
from string import Template
from typing import Any, Dict, Union, Optional
import dj_database_url
import django_cache_url
from server.settings.util import BASE_DIR, config
class DatabaseConfig:
url = "sqlite:///" + os.path.join(BASE_DIR, "db.sqlite3")
DATABASES = { # {数据库名: 额外配置}
"default": {
"OPTIONS": {"charset": "utf8mb4"},
}
}
@classmethod
def get(
cls, url: Optional[str] = None
) -> Dict[str, Dict[str, Union[str, int, Dict[str, str]]]]:
if url is None:
url = config("DATABASE_URL", cls.url)
res = dj_database_url.parse(url)
if url.startswith("sqlite"):
return {
k: {**res}
for k, v in cls.DATABASES.items()
}
return {
k: {**v, **res}
for k, v in cls.DATABASES.items()
}
class CacheConfig:
url: str = "locmem://"
CACHES: dict[str, int] = { # {Cache名称: 编号}
"default": 0,
"session": 1,
"wechat_session": 2, # wechat
"db_cache": 3, # 数据库缓存
"view": 4, # 视图缓存
"celery": 5, # celery缓存
}
@staticmethod
def parse_url(url: str, db: int = 0) -> Dict[str, Any]:
cache_url = url
if cache_url.endswith('/'):
cache_url += f'{db}'
else:
cache_url += f'/{db}'
return django_cache_url.parse(cache_url)
@classmethod
def get(cls, url: Optional[str] = None) -> Dict[str, Dict[str, Any]]:
if url is None:
url = config("CACHE_URL", cls.url)
return {
k: {
**(cls.parse_url(url, v))
}
for k, v in cls.CACHES.items()
}
class LogConfig:
level: str = config("LOG_LEVEL", "INFO")
logger_level: Dict[str, str] = {}
LOG_ROOT = BASE_DIR.joinpath("logs", "django")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] "
"[%(levelname)s]- %(message)s"
},
"simple": {"format": "%(levelname)s %(message)s"}, # 简单格式
},
"handlers": {
"server": {
"class": "server.utils.logging_handler.InterceptTimedRotatingFileHandler",
"filename": LOG_ROOT.joinpath("server", "server.alog"),
"when": "D",
"interval": 1,
"backupCount": 1,
"formatter": "standard",
"encoding": "utf-8",
},
"db": {
"class": "server.utils.logging_handler.InterceptTimedRotatingFileHandler",
"filename": LOG_ROOT.joinpath("db", "db.log"),
"when": "D",
"interval": 1,
"backupCount": 1,
"formatter": "standard",
"encoding": "utf-8",
"logging_levels": [
"debug"
], # 注意这里,这是自定义类多了一个参数,因为我只想让db日志有debug文件,所以我只看sql,这个可以自己设置
},
"debug": {
"class": "server.utils.logging_handler.InterceptTimedRotatingFileHandler",
"filename": LOG_ROOT.joinpath("debug", "debug.log"),
"when": "D",
"interval": 1,
"backupCount": 1,
"formatter": "standard",
"encoding": "utf-8",
},
},
"loggers": {
# Django全局绑定
"django": {
"handlers": ["server"],
"propagate": True,
"level": "${level}",
},
"celery": {
"handlers": ["server"],
"propagate": False,
"level": "${level}",
},
"django.db.backends": {
"handlers": ["db"],
"propagate": False,
"level": "${level}",
},
"django.request": {
"handlers": ["server"],
"propagate": False,
"level": "${level}",
},
# Werkzeug Debug
"werkzeug": {
"handlers": ["debug"],
"propagate": False,
"level": "${level}",
},
},
}
@classmethod
def get_config(cls) -> Dict[str, Dict[str, Any]]:
if not os.path.exists(cls.LOG_ROOT):
os.makedirs(cls.LOG_ROOT)
res = deepcopy(cls.LOGGING)
for logger in res["loggers"]:
if logger in cls.logger_level:
res["loggers"][logger]["level"] = Template(
res["loggers"][logger]["level"]
).substitute(level=cls.logger_level[logger])
else:
name = f"LOG_LEVEL_{logger}"
res["loggers"][logger]["level"] = Template(
res["loggers"][logger]["level"]
).substitute(level=config(name, cls.level))
return res | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/settings/components/configs.py | configs.py |
import logging
import os
from loguru import logger
class InterceptTimedRotatingFileHandler(logging.Handler):
"""
自定义反射时间回滚日志记录器
缺少命名空间
"""
def __init__(
self,
filename,
when="d",
interval=1,
backupCount=15,
encoding="utf-8",
delay=False,
utc=False,
atTime=None,
logging_levels="all",
):
super(InterceptTimedRotatingFileHandler, self).__init__()
self.atTime = atTime
filename = os.path.abspath(filename)
when = when.lower()
# 需要本地用不同的文件名做为不同日志的筛选器
self.logger_ = logger.bind(sime=filename)
self.filename = filename
key_map = {
"h": "hour",
"w": "week",
"s": "second",
"m": "minute",
"d": "day",
}
# 根据输入文件格式及时间回滚设立文件名称
rotation = "%d %s" % (interval, key_map[when])
retention = "%d %ss" % (backupCount, key_map[when])
time_format = "{time:%Y-%m-%d_%H-%M-%S}"
if when == "s":
time_format = "{time:%Y-%m-%d_%H-%M-%S}"
elif when == "m":
time_format = "{time:%Y-%m-%d_%H-%M}"
elif when == "h":
time_format = "{time:%Y-%m-%d_%H}"
elif when == "d":
time_format = "{time:%Y-%m-%d}"
elif when == "w":
time_format = "{time:%Y-%m-%d}"
level_keys = ["info"]
# 构建一个筛选器
levels = {
"debug": lambda x: "DEBUG" == x["level"].name.upper()
and x["extra"].get("sime") == filename,
"error": lambda x: "ERROR" == x["level"].name.upper()
and x["extra"].get("sime") == filename,
"info": lambda x: "INFO" == x["level"].name.upper()
and x["extra"].get("sime") == filename,
"warning": lambda x: "WARNING" == x["level"].name.upper()
and x["extra"].get("sime") == filename,
}
# 根据输出构建筛选器
if isinstance(logging_levels, str):
if logging_levels.lower() == "all":
level_keys = levels.keys()
elif logging_levels.lower() in levels:
level_keys = [logging_levels]
elif isinstance(logging_levels, (list, tuple)):
level_keys = logging_levels
for k, f in {_: levels[_] for _ in level_keys}.items():
# 为防止重复添加sink,而重复写入日志,需要判断是否已经装载了对应sink,防止其使用秘技:反复横跳。
filename_fmt = filename.replace(
".log", "_%s_%s.log" % (time_format, k)
)
# noinspection PyUnresolvedReferences,PyProtectedMember
file_key = {
_._name: han_id
for han_id, _ in self.logger_._core.handlers.items()
}
filename_fmt_key = "'{}'".format(filename_fmt)
if filename_fmt_key in file_key:
continue
# self.logger_.remove(file_key[filename_fmt_key])
self.logger_.add(
filename_fmt,
retention=retention,
encoding=encoding,
level=self.level,
rotation=rotation,
compression="tar.gz", # 日志归档自行压缩文件
delay=delay,
enqueue=True,
filter=f,
)
def emit(self, record):
try:
level = self.logger_.level(record.levelname).name
except ValueError:
level = record.levelno
frame, depth = logging.currentframe(), 2
# 把当前帧的栈深度回到发生异常的堆栈深度,不然就是当前帧发生异常而无法回溯
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
self.logger_.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
) | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/utils/logging_handler.py | logging_handler.py |
from django.conf import settings
from django.core.cache import caches
from wechatpy import WeChatClientException
from wechatpy.client import WeChatClient
from wechatpy.session import SessionStorage
from zq_django_util.exceptions import ApiException
from zq_django_util.response import ResponseType
class WechatCache(SessionStorage):
def __init__(self, cache):
self.cache = cache
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, ttl=None):
self.cache.set(key, value, timeout=ttl)
def delete(self, key):
self.cache.delete(key)
wechat_client = WeChatClient(
settings.APPID,
settings.SECRET,
session=WechatCache(caches["wechat_session"]),
)
ENV_VERSION = "release"
if (
hasattr(settings, "SERVER_URL") and
hasattr(settings, "PRODUCTION_SERVER_LIST") and
hasattr(settings, "DEVELOPMENT_SERVER_LIST")
):
if settings.SERVER_URL in settings.PRODUCTION_SERVER_LIST:
ENV_VERSION = "release"
elif settings.SERVER_URL in settings.DEVELOPMENT_SERVER_LIST:
ENV_VERSION = "trial"
def get_openid(code: str) -> dict:
"""
获取小程序登录后 openid
:param code: 临时 code
:return: openid
"""
try:
return wechat_client.wxa.code_to_session(code)
except WeChatClientException as e:
if e.errcode == 40029:
raise ApiException(ResponseType.ThirdLoginExpired, "微信登录失败,请重新登录")
raise ApiException(
ResponseType.ThirdLoginFailed,
f"微信登录失败 [{e.errcode}] {e.errmsg}",
record=True,
)
def get_user_phone_num(code: str) -> str:
"""
获取用户手机号
:param code: 临时 code
:return: 手机号
"""
try:
result = wechat_client.wxa.getuserphonenumber(code)
if result["phone_info"]["countryCode"] != "86":
raise ApiException(ResponseType.ParamValidationFailed, "仅支持中国大陆手机号")
return result["phone_info"]["purePhoneNumber"]
except WeChatClientException as e:
raise ApiException(
ResponseType.ThirdLoginFailed, f"[{e.errcode}] {e.errmsg}"
) | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/utils/wechat.py | wechat.py |
import datetime
from django.conf import settings
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from zq_django_util.exceptions import ApiException
from zq_django_util.response import ResponseType
from zq_django_util.utils.oss.utils import split_file_name, get_token, check_callback_signature
from files.models import File
from files.serializers import FileCreateSerializer, FileSerializer
from users.models import User
from server.utils.choices import FileTypeChoice
class FileViewSet(ModelViewSet):
"""
文件视图集
"""
queryset = File.objects.all()
serializer_class = FileSerializer
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
# 动态设置序列化器
if self.action == "create":
return FileCreateSerializer
else:
return FileSerializer
def get_queryset(self):
# 获取当前用户的文件
if self.action == "callback":
return self.queryset
return self.queryset.filter(user=self.request.user)
def get_permissions(self):
if self.action in ["callback"]:
return [AllowAny()]
return super(FileViewSet, self).get_permissions()
def perform_destroy(self, instance):
instance.file.delete() # 删除文件
instance.delete()
def perform_update(self, serializer):
if "file" in self.request.data: # 更新文件时删除源文件
instance = self.get_object()
instance.file.delete()
serializer.save()
else:
serializer.save()
@action(methods=["post"], detail=False)
def token(self, request):
"""
获取OSS直传token
"""
user: User = request.user
if not user.is_authenticated:
raise ApiException(ResponseType.NotLogin, "您尚未登录,请登录后重试")
upload_type = request.data.get("type")
filename = request.data.get("name")
if not upload_type:
raise ApiException(
ResponseType.ParamValidationFailed,
"请传入type参数",
record=True
)
if not filename:
raise ApiException(
ResponseType.ParamValidationFailed,
"请传入name参数",
record=True
)
info: dict | None = None
if upload_type in FileTypeChoice.choices:
name, ext = split_file_name(filename)
file = File.objects.create(user=user, name=name, ext=ext)
callback_dict = {
"callbackUrl": f"{settings.SERVER_URL}/api/files/{file.id}/callback/?type={upload_type}",
"callbackBody": "file=${object}&size=${size}",
"callbackBodyType": "application/x-www-form-urlencoded",
}
info = get_token(
f"media/files/{filename}",
callback=callback_dict,
)
info["file_id"] = file.id
if info:
return Response(info)
else:
raise ApiException(
ResponseType.PermissionDenied,
"您没有权限上传此文件",
record=True
)
@action(methods=["post"], detail=True)
def callback(self, request, pk=None):
"""
OSS回调
"""
if not check_callback_signature(request):
raise ApiException(
ResponseType.PermissionDenied,
"OSS回调签名检验失败",
record=True
)
instance = self.get_object()
upload_type = request.GET.get("type")
url = request.data.get("file")
name = url.lstrip(settings.MEDIA_URL.lstrip("/"))
if upload_type in FileTypeChoice.choices:
instance.file.name = name
instance.size = request.data.get("size")
instance.type = FileTypeChoice(upload_type)
else:
raise ApiException(
ResponseType.ParamValidationFailed,
"type参数错误",
record=True
)
instance.update_time = datetime.datetime.now()
instance.save()
return Response(FileSerializer(instance).data) | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/apps/files/views.py | views.py |
import os
import shlex
import subprocess
from pathlib import Path
from time import sleep
from django.conf import settings
from django.utils import autoreload
from loguru import logger
from django.core.management import BaseCommand, CommandError
from server.settings import config
class Command(BaseCommand):
help = 'Run celery components'
work_dir = os.getcwd()
conda_env_name = None
conda_activate_command = None
conda_deactivate_command = None
options = None
beat_thread = None
worker_thread = None
flower_thread = None
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
super().__init__(stdout, stderr, no_color, force_color)
def add_arguments(self, parser):
parser.add_argument(
'--beat',
action='store_true',
dest='beat_enabled',
default=False,
help='Enable celery beat',
)
parser.add_argument(
'--worker',
action='store_true',
dest='worker_enabled',
default=False,
help='Enable celery worker',
)
parser.add_argument(
'--flower',
action='store_true',
dest='flower_enabled',
default=False,
help='Enable celery flower',
)
parser.add_argument(
'--loglevel',
action='store',
dest='loglevel',
default='INFO',
help='Set celery log level',
)
parser.add_argument(
'--broker',
action='store',
dest='broker',
default=config('CELERY_BROKER_URL', None),
help='Set celery broker',
)
def check_arguments(self, options):
if not options['beat_enabled'] and not options['worker_enabled'] and not options['flower_enabled']:
raise CommandError('You must enable at least one of the celery components: --beat, --worker, --flower')
if options['broker'] is None:
raise CommandError('You must set a broker for celery: --broker, or define '
'CELERY_BROKER_URL in your .env file')
self.options = options
def prepare_env(self):
# detect conda
self.conda_env_name = os.environ.get('CONDA_DEFAULT_ENV', None)
if self.conda_env_name is not None:
logger.info('Conda env name: {}', self.conda_env_name)
self.conda_activate_command = f'conda activate {self.conda_env_name}'
self.conda_deactivate_command = f'conda deactivate'
def run_command(self, command):
if self.conda_env_name is not None:
command = f'{self.conda_activate_command} && {command}'
logger.debug('Running command: {}', command)
return subprocess.Popen(
shlex.split(command),
shell=True,
cwd=self.work_dir,
env=os.environ
)
def run_celery_beat(self):
logger.info('Running celery beat...')
self.beat_thread = self.run_command(
f'celery -A server -b {self.options["broker"]} beat -l {self.options["loglevel"]}'
)
def run_celery_worker(self):
logger.info('Running celery worker...')
self.worker_thread = self.run_command(
f'celery -A server -b {self.options["broker"]} worker -l {self.options["loglevel"]} -P solo'
)
def run_celery_flower(self):
logger.info('Running celery flower...')
self.flower_thread = self.run_command(
f'celery -A server -b {self.options["broker"]} flower -l {self.options["loglevel"]}'
)
def start(self):
if self.options['beat_enabled']:
self.run_celery_beat()
if self.options['worker_enabled']:
self.run_celery_worker()
if self.options['flower_enabled']:
self.run_celery_flower()
def stop(self):
if self.beat_thread is not None:
logger.info('Stopping celery beat...')
self.beat_thread.send_signal(0)
self.beat_thread.wait()
self.beat_thread = None
if self.worker_thread is not None:
logger.info('Stopping celery worker...')
self.worker_thread.send_signal(0)
self.worker_thread.wait()
self.worker_thread = None
if self.flower_thread is not None:
logger.info('Stopping celery flower...')
self.flower_thread.send_signal(0)
self.flower_thread.wait()
self.flower_thread = None
if os.name == 'nt':
os.system('taskkill /im celery.exe /f')
else:
os.system('pkill celery')
def run(self):
self.stop()
self.start()
while True:
if self.beat_thread is not None:
self.beat_thread.wait()
if self.worker_thread is not None:
self.worker_thread.wait()
if self.flower_thread is not None:
self.flower_thread.wait()
self.start()
@staticmethod
def tasks_watchdog(sender, **kwargs):
sender.watch_dir(settings.BASE_DIR, '**/*tasks.py')
def handle(self, *args, **options):
self.check_arguments(options)
self.prepare_env()
autoreload.autoreload_started.connect(self.tasks_watchdog)
autoreload.run_with_reloader(self.run) | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/apps/async_tasks/management/commands/runcelery.py | runcelery.py |
from rest_framework import status, mixins
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework_simplejwt.exceptions import TokenError
from rest_framework_simplejwt.views import TokenObtainPairView
from zq_django_util.exceptions import ApiException
from zq_django_util.response import ResponseType
{%- if cookiecutter.use_wechat == 'y' %}
from zq_django_util.utils.auth.serializers import PasswordLoginSerializer, OpenIdLoginSerializer
{%- else %}
from zq_django_util.utils.auth.serializers import PasswordLoginSerializer
{%- endif %}
from users.models import User
{%- if cookiecutter.use_wechat == 'y' %}
from .serializers import WechatLoginSerializer, SmsMsgSerializer
{%- else %}
from .serializers import SmsMsgSerializer
{%- endif %}
{%- if cookiecutter.use_wechat == 'y' %}
class OpenIdLoginView(TokenObtainPairView):
"""
open id 登录视图(仅供测试微信登录使用)
"""
queryset = User.objects.all()
serializer_class = OpenIdLoginSerializer
def post(self, request, *args, **kwargs):
"""
增加 post 方法, 支持 open id 登录
"""
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except TokenError:
raise ApiException(
ResponseType.ThirdLoginFailed,
msg="微信登录失败",
detail="生成token时simple jwt发生TokenError",
record=True,
)
return Response(serializer.validated_data, status=status.HTTP_200_OK)
class WechatLoginView(OpenIdLoginView):
"""
微信登录视图
"""
queryset = User.objects.all()
serializer_class = WechatLoginSerializer
{%- endif %}
class PasswordLoginView(TokenObtainPairView):
serializer_class = PasswordLoginSerializer
class SmsMsgView(mixins.ListModelMixin, mixins.CreateModelMixin, GenericViewSet):
"""
## create:
- code为空: 发送短信验证码
- code不为空: 验证短信验证码
测试 从 console 中展示结果
"""
queryset = None
serializer_class = SmsMsgSerializer
permission_classes = [AllowAny]
def list(self, request, *args, **kwargs):
# 便于 browser api 访问
# 去掉后无法通过浏览器界面访问 post 接口
raise ApiException(ResponseType.MethodNotAllowed, msg="不支持GET方法发送短信") | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/apps/oauth/views.py | views.py |
import random
from django.core.cache import cache
from loguru import logger
from zq_django_util.exceptions import ApiException
from zq_django_util.response import ResponseType
{%- if cookiecutter.use_celery == 'y' %}
from .tasks import send_sms_msg
{%- endif %}
class VerifyCodeUtil:
VERIFY_KEY = "sms:verify_code:phone_{}"
VERIFY_TIMEOUT = 3 * 60
FLAG_KEY = "sms:flag:phone_{}"
FLAG_TIMEOUT = 60
@staticmethod
def _generate_code() -> int:
code = random.randint(100000, 999999)
return code
@classmethod
def _get_cache_code(cls, phone: str) -> int | None:
key = cls.VERIFY_KEY.format(phone)
code = cache.get(key)
return code
@classmethod
def _set_cache_code(cls, phone: str, code: int | None) -> None:
key = cls.VERIFY_KEY.format(phone)
if code is not None:
# 记录验证码
cache.set(key, code, cls.VERIFY_TIMEOUT)
else:
# 删除验证码
cache.delete(key)
@classmethod
def _get_cache_flag(cls, phone: str) -> bool:
key = cls.FLAG_KEY.format(phone)
flag = cache.get(key)
return flag or False
@classmethod
def _set_cache_flag(cls, phone: str) -> None:
key = cls.FLAG_KEY.format(phone)
cache.set(key, True, cls.FLAG_TIMEOUT)
@classmethod
def send_sms_verify_code(cls, phone: str) -> None:
"""
发送短信验证码
:param phone: 手机号
"""
if cls._get_cache_flag(phone):
raise ApiException(ResponseType.APIThrottled, msg="短信发送过于频繁,请在1分钟后重试")
code = cls._generate_code()
{%- if cookiecutter.use_celery == 'y' %}
send_sms_msg.delay(phone, code)
{%- else %}
logger.info("发送短信验证码: phone={}, code={}", phone, code)
{%- endif %}
cls._set_cache_code(phone, code)
cls._set_cache_flag(phone)
@classmethod
def verify_sms_code(cls, phone: str, code: int) -> bool:
"""
验证短信验证码
:param phone: 手机号
:param code: 验证码
:return: 是否验证成功
"""
cache_code = cls._get_cache_code(phone)
if cache_code is None or cache_code != code:
return False
cls._set_cache_code(phone, None)
return True | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/apps/oauth/utils.py | utils.py |
import re
import random
from typing import Dict, Any
from rest_framework import serializers
from rest_framework_simplejwt.serializers import PasswordField
from zq_django_util.exceptions import ApiException
{%- if cookiecutter.use_wechat == 'y' %}
from zq_django_util.utils.auth.serializers import OpenIdLoginSerializer
{%- endif %}
from oauth.utils import VerifyCodeUtil
{%- if cookiecutter.use_wechat == 'y' %}
from server.utils.wechat import get_openid
{%- endif %}
{%- if cookiecutter.use_wechat == 'y' %}
class WechatLoginSerializer(OpenIdLoginSerializer):
"""
微信登录序列化器
"""
code = PasswordField(label="前端获取code") # 前端传入 code
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields.pop("openid") # 删除 openid 字段
def get_open_id(self, attrs: Dict[str, Any]) -> str:
"""
重写获取 open_id 方法
"""
return get_openid(attrs["code"])["openid"]
{%- endif %}
class SmsMsgSerializer(serializers.Serializer):
"""
发送短信验证码序列化器
"""
phone = serializers.CharField(max_length=11, min_length=11, label="手机号", write_only=True)
code = serializers.IntegerField(min_value=100000, max_value=999999, label="验证码", write_only=True, required=False)
status = serializers.CharField(max_length=2, min_length=10, label="状态", read_only=True)
def validate_phone(self, value: str) -> str:
"""
验证手机号
"""
if not re.match(r"^1[3456789]\d{9}$", value):
raise ApiException
return value
def create(self, validated_data: Dict[str, Any]) -> Dict[str, Any]:
"""
短信验证码
"""
phone = validated_data["phone"]
if not validated_data.get("code"):
# 发送短信验证码
VerifyCodeUtil.send_sms_verify_code(phone)
return {"status": "send code"}
else:
# 验证短信验证码
code = validated_data["code"]
status = VerifyCodeUtil.verify_sms_code(phone, code)
return {"status": "verify code " + str(status)} | zq-django-template | /zq_django_template-0.1.0.tar.gz/zq_django_template-0.1.0/{{cookiecutter.project_name}}/server/apps/oauth/serializers.py | serializers.py |
from dataclasses import dataclass
from enum import Enum, unique
from typing import TYPE_CHECKING, Optional
from django.http import JsonResponse
if TYPE_CHECKING:
from zq_django_util.exceptions import ApiException
from zq_django_util.response.types import JSONVal, ResponseData
class ResponseTypeEnum(Enum):
@property
def code(self) -> str:
"""
根据枚举名称取状态码code
:return: 状态码code
"""
return self.value[0]
@property
def detail(self) -> str:
"""
根据枚举名称取状态说明message
:return: 状态说明message
"""
return self.value[1]
@property
def status_code(self) -> int:
"""
根据枚举名称取状态码status_code
:return: 状态码status_code
"""
return self.value[2]
# region ResponseType
@unique
class ResponseType(ResponseTypeEnum):
"""API状态类型"""
Success = ("00000", "", 200)
ClientError = ("A0000", "用户端错误", 400)
LoginFailed = ("A0210", "用户登录失败", 400)
UsernameNotExist = ("A0211", "用户名不存在", 400)
PasswordWrong = ("A0212", "用户密码错误", 400)
LoginFailedExceed = ("A0213", "用户输入密码次数超限", 400)
PhoneNotExist = ("A0214", "手机号不存在", 400)
LoginExpired = ("A0220", "用户登录已过期", 401)
TokenInvalid = ("A0221", "token 无效或已过期", 401)
RefreshTokenInvalid = ("A0221", "refresh token 无效或已过期", 401)
ThirdLoginFailed = ("A0230", "用户第三方登录失败", 401)
ThirdLoginCaptchaError = ("A0232", "用户第三方登录验证码错误", 401)
ThirdLoginExpired = ("A0233", "用户第三方登录已过期", 401)
PermissionError = ("A0300", "用户权限异常", 403)
NotLogin = ("A0310", "用户未登录", 401)
NotActive = ("A0311", "用户未激活", 403)
PermissionDenied = ("A0312", "用户无权限", 403)
ServiceNotAvailable = ("A0313", "不在服务时段", 403)
UserBlocked = ("A0320", "黑名单用户", 403)
UserFrozen = ("A0321", "账号被冻结", 403)
IPInvalid = ("A0322", "非法 IP 地址", 401)
ParamError = ("A0400", "用户请求参数错误", 400)
JSONParseFailed = ("A0410", "请求 JSON 解析错误", 400)
ParamEmpty = ("A0420", "请求必填参数为空", 400)
ParamValidationFailed = ("A0430", "请求参数值校验失败", 400)
RequestError = ("A0500", "用户请求服务异常", 400)
APINotFound = ("A0510", "请求接口不存在", 404)
MethodNotAllowed = ("A0511", "请求方法不允许", 405)
APIThrottled = ("A0512", "请求次数超出限制", 429)
HeaderNotAcceptable = ("A0513", "请求头无法满足", 406)
ResourceNotFound = ("A0514", "请求资源不存在", 404)
UploadError = ("A0600", "用户上传文件异常", 400)
UnsupportedMediaType = ("A0610", "用户上传文件类型不支持", 400)
UnsupportedMediaSize = ("A0613", "用户上传文件大小错误", 400)
VersionError = ("A0700", "用户版本异常", 400)
AppVersionError = ("A0710", "用户应用安装版本不匹配", 400)
APIVersionError = ("A0720", "用户 API 请求版本不匹配", 400)
ServerError = ("B0000", "系统执行出错", 500)
ServerTimeout = ("B0100", "系统执行超时", 500)
ServerResourceError = ("B0200", "系统资源异常", 500)
ThirdServiceError = ("C0000", "调用第三方服务出错", 500)
MiddlewareError = ("C0100", "中间件服务出错", 500)
ThirdServiceTimeoutError = ("C0200", "第三方系统执行超时", 500)
DatabaseError = ("C0300", "数据库服务出错", 500)
CacheError = ("C0400", "缓存服务出错", 500)
NotificationError = ("C0500", "通知服务出错", 500)
# endregion
@dataclass
class ApiResponse:
"""API响应数据结构"""
status_code: int
code: str
detail: str
msg: str
data: "ResponseData"
def __init__(
self,
response_type: ResponseType = ResponseType.Success,
data: "JSONVal" = "",
msg: Optional[str] = None,
ex: Optional["ApiException"] = None,
):
"""
Api 响应
:param response_type: 响应类型
:param data: 响应数据
:param msg: 面向用户的响应消息
:param ex: Api异常(用于获取相关数据)
"""
if ex: # 优先使用异常
response_type = ex.response_type # 获取传入异常的类型
msg = ex.msg # 获取传入异常的消息
self.status_code = response_type.status_code
self.code = response_type.code
self.detail = response_type.detail
self.msg = msg if msg else self.detail
self.data = data
def __str__(self) -> str:
return f"code: {self.code}, detail: {self.detail}, msg: {self.msg}, data: {self.data}"
def __dict__(self) -> "ResponseData":
return {
"code": self.code,
"detail": self.detail,
"msg": self.msg,
"data": self.data,
}
def to_json_response(self) -> JsonResponse:
return JsonResponse(self.__dict__(), status=self.status_code) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/response/__init__.py | __init__.py |
from typing import Optional, Union
import django.core.exceptions as django_exceptions
import rest_framework.exceptions as drf_exceptions
import rest_framework_simplejwt.exceptions as jwt_exceptions
from django.http import Http404
from drf_standardized_errors.formatter import ExceptionFormatter
from drf_standardized_errors.types import ExceptionHandlerContext
from rest_framework.response import Response
from rest_framework.views import set_rollback
from zq_django_util.exceptions import ApiException
from zq_django_util.exceptions.configs import zq_exception_settings
from zq_django_util.exceptions.types import ExtraHeaders
from zq_django_util.response import ResponseType
from zq_django_util.response.types import ApiExceptionResponse
if zq_exception_settings.SENTRY_ENABLE: # pragma: no cover
import sentry_sdk
class ApiExceptionHandler:
exc: Exception
context: ExceptionHandlerContext
def __init__(
self, exc: Exception, context: ExceptionHandlerContext
) -> None:
self.exc = exc
self.context = context
def run(self) -> Optional[ApiExceptionResponse]:
"""
处理异常
:return: 响应数据或失败为None
"""
exc = self.convert_known_exceptions(self.exc) # 将django的异常转换为drf的异常
if (
zq_exception_settings.EXCEPTION_UNKNOWN_HANDLE
): # 未知异常处理(非drf、api的异常)
exc = self.convert_unhandled_exceptions(exc) # 将未知异常转换为drf异常
exc = self.convert_drf_exceptions(exc) # 将drf异常转换为api异常
set_rollback() # 设置事务回滚
response = None
if isinstance(exc, ApiException): # 如果是api异常则进行解析
response = self.get_response(exc)
if exc.record: # 如果需要记录
if zq_exception_settings.SENTRY_ENABLE:
self._notify_sentry(exc, response)
# 将event_id写入响应数据
response.data["data"]["event_id"] = exc.event_id
# 将异常信息记录到response中,便于logger记录
response.exception_data = exc
return response
def _notify_sentry(self, exc: ApiException, response: Response) -> None:
"""
通知sentry, 可在notify_sentry中自定义其他内容
:param exc: Api异常
:param response: 响应数据
:return: None
"""
try:
self.notify_sentry(exc, response) # 调用自定义通知
except Exception:
pass
# 默认异常汇报
sentry_sdk.api.set_tag("exception_type", exc.response_type.name)
sentry_sdk.set_context(
"exp_info",
{
"eid": response.data["data"]["eid"],
"code": response.data["code"],
"detail": response.data["detail"],
"msg": response.data["msg"],
},
)
sentry_sdk.set_context("details", response.data["data"]["details"])
exc.event_id = sentry_sdk.api.capture_exception(self.exc) # 发送至sentry
def notify_sentry(self, exc: ApiException, response: Response) -> None:
"""
自定义sentry通知
:param exc: Api异常
:param response: 响应数据
:return: None
"""
user = self.context["request"].user
if user.is_authenticated:
sentry_sdk.api.set_tag("role", "user")
sentry_sdk.api.set_user(
{
"id": user.id,
"email": user.username,
"phone": user.phone if hasattr(user, "phone") else None,
}
)
else:
sentry_sdk.api.set_tag("role", "guest")
def get_response(self, exc: ApiException) -> Response:
"""
获取响应数据
:param exc: Api异常
:return:
"""
headers = self.get_headers(exc.inner)
data = exc.response_data
info = None
if exc.inner:
if isinstance(exc.inner, drf_exceptions.APIException):
info = ExceptionFormatter(
exc.inner, self.context, self.exc
).run()
else:
info = exc.inner.args[0] if exc.inner.args else None
data["data"]["details"] = info
return Response(
data,
status=exc.response_type.status_code,
headers=headers,
)
@staticmethod
def get_headers(exc: Exception) -> ExtraHeaders:
"""
获取额外响应头
:param exc: 异常
:return: headers
"""
headers = {}
if getattr(exc, "auth_header", None):
headers["WWW-Authenticate"] = exc.auth_header
if getattr(exc, "wait", None):
headers["Retry-After"] = "%d" % exc.wait
return headers
@staticmethod
def convert_known_exceptions(exc: Exception) -> Exception:
"""
By default, Django's built-in `Http404` and `PermissionDenied` are converted
to their DRF equivalent.
"""
if isinstance(exc, Http404):
return drf_exceptions.NotFound()
elif isinstance(exc, django_exceptions.PermissionDenied):
return drf_exceptions.PermissionDenied()
# jwt
elif isinstance(exc, jwt_exceptions.InvalidToken):
return ApiException(
type=ResponseType.TokenInvalid,
inner=exc,
)
elif isinstance(exc, jwt_exceptions.AuthenticationFailed):
return ApiException(
type=ResponseType.LoginFailed,
inner=exc,
)
elif isinstance(exc, jwt_exceptions.TokenError):
return ApiException(
type=ResponseType.TokenInvalid,
detail="Token解析错误",
inner=exc,
)
else:
return exc
@staticmethod
def convert_unhandled_exceptions(
exc: Exception,
) -> Union[drf_exceptions.APIException, ApiException]:
"""
Any non-DRF unhandled exception is converted to an APIException which
has a 500 status code.
"""
if not isinstance(exc, drf_exceptions.APIException) and not isinstance(
exc, ApiException
):
return drf_exceptions.APIException(detail=str(exc))
else:
return exc
@staticmethod
def convert_drf_exceptions(
exc: Union[drf_exceptions.APIException, ApiException, Exception],
) -> ApiException | Exception:
"""
转换drf异常
:param exc: drf异常
:return: ApiException
"""
if isinstance(exc, ApiException):
return exc
# 处理drf异常
record = False
response_type = ResponseType.ServerError
detail = None
msg = None
if isinstance(exc, drf_exceptions.ParseError):
response_type = ResponseType.JSONParseFailed
elif isinstance(exc, drf_exceptions.AuthenticationFailed):
response_type = ResponseType.LoginFailed
elif isinstance(exc, drf_exceptions.NotAuthenticated):
# 未登录
response_type = ResponseType.NotLogin
elif isinstance(exc, drf_exceptions.PermissionDenied):
response_type = ResponseType.PermissionDenied
elif isinstance(exc, drf_exceptions.NotFound):
response_type = ResponseType.APINotFound
elif isinstance(exc, drf_exceptions.ValidationError):
# 校验失败
record = True
response_type = ResponseType.ParamValidationFailed
elif isinstance(exc, drf_exceptions.MethodNotAllowed):
# 方法错误
record = True
response_type = ResponseType.MethodNotAllowed
detail = f"不允许{getattr(exc, 'args', None)[0]}请求"
elif isinstance(exc, drf_exceptions.NotAcceptable):
record = True
response_type = ResponseType.HeaderNotAcceptable
detail = f"不支持{getattr(exc, 'args', None)[0]}的响应格式"
elif isinstance(exc, drf_exceptions.UnsupportedMediaType):
record = True
response_type = ResponseType.UnsupportedMediaType
detail = f"不支持{getattr(exc, 'args', None)[0]}的请求格式"
msg = f"暂不支持{getattr(exc, 'args', None)[0]}文件上传,请使用支持的文件格式重试"
elif isinstance(exc, drf_exceptions.Throttled):
record = True
response_type = ResponseType.APIThrottled
detail = f"请求频率过高,请{getattr(exc, 'args', None)[0]}s后再试"
msg = f"请求太快了,请{getattr(exc, 'args', None)[0]}s后再试"
elif isinstance(exc, drf_exceptions.APIException):
record = True
response_type = ResponseType.ServerError
else:
# 不处理其他异常
return exc
return ApiException(response_type, msg, exc, record, detail)
def exception_handler(
exc: Exception, context: ExceptionHandlerContext
) -> Optional[ApiExceptionResponse]:
"""
自定义异常处理
:param exc: 异常
:param context: 上下文
:return: 处理程序
"""
handler_class = zq_exception_settings.EXCEPTION_HANDLER_CLASS
if handler_class != ApiExceptionHandler and not issubclass(
handler_class, ApiExceptionHandler
):
raise ImportError(
f"{handler_class} is not a subclass of ApiExceptionHandler"
)
return handler_class(exc, context).run() | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/exceptions/handler.py | handler.py |
import hashlib
import random
import time
import traceback
from datetime import datetime
from sys import exc_info
from typing import TYPE_CHECKING, Optional
from django.conf import settings
from django.utils.timezone import now
if TYPE_CHECKING:
from zq_django_util.exceptions.types import ExceptionData, ExceptionInfo
from zq_django_util.response import ResponseData, ResponseType
class ApiException(Exception):
"""API异常"""
DEFAULT_MSG = "服务器开小差了"
RECORD_MSG_TEMPLATE = "请向工作人员反馈以下内容:"
record: bool
response_type: "ResponseType"
eid: Optional[str]
detail: str
msg: str
event_id: Optional[str]
inner: Optional[Exception]
_exc_data: Optional["ExceptionInfo"]
time: datetime
def __init__(
self,
type: "ResponseType",
msg: Optional[str] = None,
inner: Optional[Exception] = None,
record: Optional[bool] = None,
detail: Optional[str] = None,
) -> None:
"""
API异常
:param type: 异常类型
:param msg: 异常建议(面向用户)
:param inner: 内部异常
:param record: 是否记录异常
:param detail: 异常详情(面向开发者)
"""
self.record = (
record if record is not None else type.status_code == 500
) # 是否记录异常
self.response_type: "ResponseType" = type
if self.record: # 记录异常
self.eid = self.get_exp_id() # 异常id
else:
self.eid = None
self.detail = self.get_exp_detail(detail) # 异常详情
self.msg = self.get_exp_msg(msg) # 异常用户提示
self.event_id = None
super().__init__(self.detail)
self.inner = inner
self._exc_data = None
self.time = now()
def get_exp_detail(self, detail: Optional[str]) -> str:
"""
获取异常详情(面向开发者)
:param detail: 自定义异常详情
:return: 异常详情
"""
res = detail or self.response_type.detail # 获取异常详情
if self.record: # 记录异常
res = f"{res}, {self.eid}" # 将自定义异常详情添加到末尾
return res
def get_exp_msg(self, msg: Optional[str]) -> str:
"""
获取异常用户提示(面向用户)
:param msg: 自定义异常用户提示
:return: 异常用户提示
"""
if self.record: # 记录异常
if msg: # 如果有自定义异常用户提示
res = f"{msg},{self.RECORD_MSG_TEMPLATE}{self.eid}" # 使用自定义用户提示
else:
res = f"{self.DEFAULT_MSG},{self.RECORD_MSG_TEMPLATE}{self.eid}" # 标准用户提示
else: # 不记录异常
res = msg or self.response_type.detail # 获取异常详情
return res
@property
def exc_data(self) -> "ExceptionInfo":
if self._exc_data is None:
self._exc_data = self.get_exception_info()
return self._exc_data
@exc_data.setter
def exc_data(self, value: "ExceptionInfo") -> None:
self._exc_data = value
@property
def response_data(self) -> "ResponseData":
"""
获取响应数据
:return: 响应数据
"""
return {
"code": self.response_type.code,
"detail": self.detail,
"msg": self.msg,
"data": self.exception_data,
}
@staticmethod
def get_exp_id() -> str:
"""
获取异常id
:return: 异常id
"""
sha = hashlib.sha1()
exp_id = time.strftime("%Y%m%d%H%M%S") + "_%04d" % random.randint(
0, 10000
)
sha.update(exp_id.encode("utf-8"))
return sha.hexdigest()[:6]
@staticmethod
def get_exception_info() -> "ExceptionInfo":
"""
获取异常信息
:return: 异常信息
"""
exc_type, exc_value, exc_traceback_obj = exc_info()
return {
"type": str(exc_type),
"msg": str(exc_value),
"info": traceback.format_exc(),
"stack": traceback.format_stack(),
}
@property
def exception_data(self) -> "ExceptionData":
"""
获取异常返回数据
:return: 返回数据
"""
data: "ExceptionData" = {
"eid": self.eid,
"time": self.time,
}
if settings.DEBUG:
data["exception"] = self.exc_data
return data | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/exceptions/__init__.py | __init__.py |
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Union
from django.conf import settings
from rest_framework.settings import import_from_string, perform_import
if TYPE_CHECKING:
from zq_django_util.utils.types import SimpleValue
SettingValue = Union[
None, SimpleValue, List["SettingValue"], Dict[str, "SettingValue"]
]
SettingDict = Dict[str, SettingValue]
class PackageSettings:
"""
Copy of DRF APISettings class with support for importing settings that
are dicts with value as a string representing the path to the class
to be imported.
"""
setting_name: Optional[str] = None
DEFAULTS: Optional["SettingDict"] = None
IMPORT_STRINGS: Optional[List[str]] = None
defaults: "SettingDict"
import_strings: List[str]
_cached_attrs: Set[str]
_user_settings: "SettingDict"
def __init__(
self,
defaults: Optional["SettingDict"] = None,
import_strings: Optional[List[str]] = None,
):
if self.setting_name is None:
raise NotImplementedError("setting_name must be set")
if self.DEFAULTS is None:
raise NotImplementedError("DEFAULTS must be set")
if self.IMPORT_STRINGS is None:
raise NotImplementedError("IMPORT_STRINGS must be set")
self.defaults = defaults or self.DEFAULTS
self.import_strings = import_strings or self.IMPORT_STRINGS
self._cached_attrs = set()
@property
def user_settings(self) -> "SettingDict":
if not hasattr(self, "_user_settings"):
assert self.setting_name is not None
self._user_settings = getattr(settings, self.setting_name, {})
return self._user_settings
def __getattr__(self, attr: str) -> "SettingValue":
if attr not in self.defaults:
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if attr in self.import_strings:
if isinstance(val, dict):
val = {k: import_from_string(v, attr) for k, v in val.items()}
else:
val = perform_import(val, attr)
# Cache the result
self._cached_attrs.add(attr)
setattr(self, attr, val)
return val
def reload(self) -> None:
for attr in self._cached_attrs:
delattr(self, attr)
self._cached_attrs.clear()
if hasattr(self, "_user_settings"):
delattr(self, "_user_settings")
def reload_package_settings(self, *args, **kwargs) -> None:
setting = kwargs["setting"]
if setting == self.setting_name:
self.reload() | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/package_settings.py | package_settings.py |
from datetime import datetime
from typing import Any, Dict
import rest_framework_simplejwt.settings
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework_simplejwt.serializers import (
PasswordField,
TokenObtainPairSerializer,
)
from rest_framework_simplejwt.tokens import RefreshToken
from zq_django_util.exceptions import ApiException
from zq_django_util.response import ResponseType
from zq_django_util.utils.auth.backends import OpenIdBackend
from zq_django_util.utils.auth.exceptions import OpenIdNotBound
AuthUser = get_user_model()
class OpenIdLoginSerializer(serializers.Serializer):
"""
OpenID Token 获取序列化器
"""
openid_field: str = getattr(settings, "OPENID_FIELD", "openid")
backend = OpenIdBackend()
openid = PasswordField()
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@classmethod
def get_token(cls, user: AuthUser) -> RefreshToken:
"""
获取 token(access与refresh)
:param user: user对象
:return: token 对象
"""
# 调用 simple-jwt 中 token 生成方法, 需要在 settings 中指定 USER_ID_FIELD 为主键名
return RefreshToken.for_user(user)
def validate(self, attrs: Dict[str, Any]) -> dict:
"""
重写验证器
:param attrs: 序列化器中待验证的数据
:return: 已验证数据,返回前端
"""
openid = self.get_open_id(attrs)
# 验证 openid
authenticate_kwargs = {
self.openid_field: openid
} # 给 openid 验证模块准备 openid
try:
# 给 openid 验证模块准备请求数据
authenticate_kwargs["request"] = self.context["request"]
except KeyError:
pass
openid_backend = self.backend
try:
user: AuthUser = openid_backend.authenticate(
**authenticate_kwargs, raise_exception=True
) # 调用 openid 验证模块进行权限验证
except OpenIdNotBound: # openid 未绑定用户
user = self.handle_new_openid(openid) # 处理新 openid
# token 获取
user_id_field = (
rest_framework_simplejwt.settings.api_settings.USER_ID_FIELD
) # 读取 settings 中定义的 user 主键字段名
refresh = self.get_token(user) # 获取 token
return self.generate_token_result(
user,
user_id_field,
refresh.access_token.current_time + refresh.access_token.lifetime,
str(refresh.access_token),
str(refresh),
)
def get_open_id(self, attrs: Dict[str, Any]) -> str:
"""
获取 openid
"""
return attrs[self.openid_field]
def generate_token_result(
self,
user: AuthUser,
user_id_field: str,
expire_time: datetime,
access: str,
refresh: str,
) -> dict:
"""
生成 token 返回结果
:param user: 用户对象
:param user_id_field: 用户主键字段
:param expire_time: 过期时间
:param access: access token
:param refresh: refresh token
:return:
"""
return dict(
id=getattr(user, user_id_field),
username=user.username,
expire_time=expire_time,
access=access,
refresh=refresh,
)
def handle_new_openid(self, openid: str) -> AuthUser:
"""
处理新 openid
如果自动注册需要返回对象
"""
raise ApiException(
ResponseType.ThirdLoginFailed,
msg="该微信账号暂未未绑定用户",
detail="openid未绑定",
)
class AbstractWechatLoginSerializer(OpenIdLoginSerializer):
"""
微信登录序列化器
"""
code = PasswordField(label="前端获取code") # 前端传入 code
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields.pop("openid") # 删除 openid 字段
def get_open_id(self, attrs: Dict[str, Any]) -> str:
"""
重写获取 open_id 方法
"""
raise NotImplementedError("请在此返回openid")
class PasswordLoginSerializer(TokenObtainPairSerializer):
def validate(self, attrs: Dict[str, Any]) -> dict:
super(TokenObtainPairSerializer, self).validate(attrs) # 获取 self.user
user_id_field = (
rest_framework_simplejwt.settings.api_settings.USER_ID_FIELD
) # 读取 settings 中定义的 user 主键字段名
refresh = self.get_token(self.user)
return self.generate_token_result(
self.user,
user_id_field,
refresh.access_token.current_time + refresh.access_token.lifetime,
str(refresh.access_token),
str(refresh),
)
def generate_token_result(
self,
user: AuthUser,
user_id_field: str,
expire_time: datetime,
access: str,
refresh: str,
) -> dict:
"""
生成 token 返回结果
:param user: 用户对象
:param user_id_field: 用户主键字段
:param expire_time: 过期时间
:param access: access token
:param refresh: refresh token
:return:
"""
return dict(
id=getattr(user, user_id_field),
username=user.username,
expire_time=expire_time,
access=access,
refresh=refresh,
) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/auth/serializers.py | serializers.py |
import base64
import datetime
import hashlib
import hmac
import json
import random
import time
from typing import AnyStr, Dict, Optional, Tuple, TypedDict
from urllib.parse import unquote
from urllib.request import urlopen
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from django.core.cache import cache
from rest_framework.request import Request
from zq_django_util.utils.oss.configs import oss_settings
from zq_django_util.utils.types import JSONValue
class OSSCallbackToken(TypedDict):
OSSAccessKeyId: str
host: str
policy: str
signature: str
expire: str
key: str
callback: str
def get_iso_8601(expire: float) -> str:
gmt = datetime.datetime.utcfromtimestamp(expire).isoformat()
gmt += "Z"
return gmt
def split_file_name(file_name: str) -> Tuple[str, str]:
"""
获取文件名与扩展名
:param file_name: 文件全名
:return: 文件名,扩展名
"""
if "." in file_name: # 文件存在扩展名
ext = file_name.split(".")[-1] # 文件扩展名
name = ".".join(file_name.split(".")[0:-1])
else:
ext = ""
name = file_name
return name, ext
def get_random_name(file_name: str) -> str:
"""
获取随机文件名
:param file_name: 原文件名
:return:
"""
name, ext = split_file_name(file_name)
new_name = time.strftime("%Y%m%d%H%M%S") # 定义文件名,年月日时分秒随机数
new_name = (
new_name
+ "_%04d" % random.randint(0, 10000)
+ (("." + ext) if ext != "" else "")
)
return new_name
def get_token(
key: str,
callback: Dict[str, str],
policy: Optional[JSONValue] = None,
) -> OSSCallbackToken:
"""
获取直传签名token
"""
# 获取Policy
expire_time = datetime.datetime.now() + datetime.timedelta(
seconds=oss_settings.TOKEN_EXPIRE_SECOND
)
expire = get_iso_8601(expire_time.timestamp())
if policy is None:
policy = {
"expiration": expire, # 过期时间
"conditions": [
{"bucket": oss_settings.BUCKET_NAME},
[
"content-length-range",
0,
oss_settings.MAX_SIZE_MB * 1024 * 1024,
], # 限制上传文件大小
["eq", "$key", f"{key}"], # 限制上传文件名
],
}
policy = json.dumps(policy).strip()
policy_encode = base64.b64encode(policy.encode())
# 签名
h = hmac.new(
oss_settings.ACCESS_KEY_SECRET.encode(),
policy_encode,
hashlib.sha1,
)
sign = base64.encodebytes(h.digest()).strip()
# 回调参数
callback_param = json.dumps(callback).strip()
base64_callback_body = base64.b64encode(callback_param.encode())
return dict(
OSSAccessKeyId=oss_settings.ACCESS_KEY_ID,
host=(
f'{oss_settings.ENDPOINT.split("://")[0]}://{oss_settings.BUCKET_NAME}.'
f'{oss_settings.ENDPOINT.split("://")[1]}'
),
policy=policy_encode.decode(),
signature=sign.decode(),
expire=expire,
key=key,
callback=base64_callback_body.decode(),
)
def check_callback_signature(request: Request) -> bool:
"""
检测回调身份
"""
authorization_base64 = request.META.get(
"HTTP_AUTHORIZATION", None
) # 获取AUTHORIZATION
pub_key_url_base64 = request.META.get(
"HTTP_X_OSS_PUB_KEY_URL", None
) # 获取公钥
if authorization_base64 is None or pub_key_url_base64 is None:
return False
try:
# 对x-oss-pub-key-url做base64解码后获取到公钥
pub_key_url = base64.b64decode(pub_key_url_base64).decode()
# 为了保证该public_key是由OSS颁发的,用户需要校验x-oss-pub-key-url的开头
if not pub_key_url.startswith(
"http://gosspublic.alicdn.com/"
) and not pub_key_url.startswith("https://gosspublic.alicdn.com/"):
return False
pub_key = get_pub_key(pub_key_url)
# 获取base64解码后的签名
authorization = base64.b64decode(authorization_base64)
# 获取待签名字符串
callback_body = request.body
if request.META["QUERY_STRING"] == "":
auth_str = (
unquote(request.META["PATH_INFO"])
+ "\n"
+ callback_body.decode()
)
else:
auth_str = (
unquote(request.META["PATH_INFO"])
+ "?"
+ request.META["QUERY_STRING"]
+ "\n"
+ callback_body.decode()
)
# 验证签名
auth_md5 = MD5.new(auth_str.encode())
rsa_pub = RSA.importKey(pub_key)
verifier = PKCS1_v1_5.new(rsa_pub)
verifier.verify(auth_md5, authorization)
return True
except Exception:
return False
def _get_pub_key_online(pub_key_url: str) -> AnyStr:
"""
从网络获取pub key
"""
response = urlopen(pub_key_url)
return response.read()
def get_pub_key(pub_key_url: str) -> AnyStr:
"""
获取公钥
:param pub_key_url: url
:return:
"""
key = f"oss:pub_key:{pub_key_url}"
try:
res = cache.get(key, None)
if res is None:
res = _get_pub_key_online(pub_key_url)
cache.set(key, res)
return res
except Exception:
return _get_pub_key_online(pub_key_url) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/oss/utils.py | utils.py |
import logging
import os
import shutil
from datetime import datetime, timezone
from tempfile import SpooledTemporaryFile
from typing import BinaryIO, List, Optional, Union
from urllib.parse import urljoin
import oss2
import oss2.exceptions
import oss2.utils
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.files import File
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_str
from oss2.models import GetObjectMetaResult
from .configs import oss_settings
from .exceptions import OssError
logger = logging.getLogger("oss")
@deconstructible
class OssStorage(Storage):
"""
Aliyun OSS Storage 基本存储
"""
access_key_id: str
access_key_secret: str
end_point: str
bucket_name: str
expire_time: int
auth: oss2.Auth
service: oss2.Service
bucket: oss2.Bucket
bucket_acl: str
base_dir: str # 基本路径
def __init__(
self,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
end_point: Optional[str] = None,
bucket_name: Optional[str] = None,
expire_time: Optional[int] = None,
):
self.access_key_id = access_key_id or oss_settings.ACCESS_KEY_ID
self.access_key_secret = (
access_key_secret or oss_settings.ACCESS_KEY_SECRET
)
self.end_point = self._normalize_endpoint(
end_point or oss_settings.ENDPOINT
)
self.bucket_name = bucket_name or oss_settings.BUCKET_NAME
self.expire_time = expire_time or oss_settings.URL_EXPIRE_SECOND
self.auth = oss2.Auth(self.access_key_id, self.access_key_secret)
self.service = oss2.Service(self.auth, self.end_point)
self.bucket = oss2.Bucket(self.auth, self.end_point, self.bucket_name)
# try to get bucket acl to check bucket exist or not
try:
self.bucket_acl = self.bucket.get_bucket_acl().acl
except oss2.exceptions.NoSuchBucket:
raise SuspiciousOperation(
"Bucket '%s' does not exist." % self.bucket_name
)
@staticmethod
def _normalize_endpoint(endpoint: str) -> str:
if not endpoint.startswith("http://") and not endpoint.startswith(
"https://"
):
return "http://" + endpoint
else:
return endpoint
def _get_key_name(self, name: str) -> str:
"""
Get the object key name in OSS, e.g.,
base_dir: /media/
:param name: test.txt
:return: media/test.txt
"""
# urljoin won't work if name is absolute path
name = name.lstrip("/")
base_path = force_str(self.base_dir)
final_path = urljoin(base_path + "/", name)
name = os.path.normpath(final_path.lstrip("/"))
# Add / to the end of path since os.path.normpath will remove it
if final_path.endswith("/") and not name.endswith("/"):
name += "/"
# Store filenames with forward slashes, even on Windows.
return name.replace("\\", "/")
def _open(self, name: str, mode: str = "rb") -> "OssFile":
"""
Open a file for reading from OSS.
:param name: 文件名
:param mode: 打开模式
:return:
"""
logger.debug("name: %s, mode: %s", name, mode)
if mode != "rb":
raise ValueError("OSS files can only be opened in read-only mode")
target_name = self._get_key_name(name)
logger.debug("target name: %s", target_name)
try:
# Load the key into a temporary file
tmp_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024) # 10MB
obj = self.bucket.get_object(target_name)
logger.info(
"content length: %d, requestid: %s",
obj.content_length,
obj.request_id,
)
if obj.content_length is None:
shutil.copyfileobj(obj, tmp_file)
else:
oss2.utils.copyfileobj_and_verify(
obj, tmp_file, obj.content_length, request_id=obj.request_id
)
tmp_file.seek(0)
return OssFile(tmp_file, target_name, self)
except oss2.exceptions.NoSuchKey:
raise OssError("%s does not exist" % name)
except Exception:
raise OssError("Failed to open %s" % name)
def _save(self, name: str, content: Union[File, bytes, str]) -> str:
target_name = self._get_key_name(name)
logger.debug("target name: %s", target_name)
logger.debug("content: %s", content)
self.bucket.put_object(target_name, content)
return os.path.normpath(name)
def create_dir(self, dirname: str) -> None:
"""
创建目录
:param dirname: 文件夹路径
:return:
"""
target_name = self._get_key_name(dirname)
if not target_name.endswith("/"):
target_name += "/"
self.bucket.put_object(target_name, "")
def exists(self, name: str) -> bool:
target_name = self._get_key_name(name)
logger.debug("name: %s, target name: %s", name, target_name)
return self.bucket.object_exists(target_name)
def get_file_meta(self, name: str) -> GetObjectMetaResult:
"""
获取文件信息
:param name: 文件名
:return:
"""
name = self._get_key_name(name)
return self.bucket.get_object_meta(name)
def size(self, name: str) -> int:
file_meta = self.get_file_meta(name)
return file_meta.content_length
def modified_time(self, name: str) -> datetime:
file_meta = self.get_file_meta(name)
return datetime.fromtimestamp(file_meta.last_modified)
created_time = accessed_time = modified_time
def get_modified_time(self, name: str) -> datetime:
file_meta = self.get_file_meta(name)
if settings.USE_TZ:
return datetime.utcfromtimestamp(file_meta.last_modified).replace(
tzinfo=timezone.utc
)
else:
return datetime.fromtimestamp(file_meta.last_modified)
get_created_time = get_accessed_time = get_modified_time
def content_type(self, name: str) -> str:
name = self._get_key_name(name)
file_info = self.bucket.head_object(name)
return file_info.content_type
def listdir(self, name: str) -> (List[str], List[str]):
if name == ".":
name = ""
name = self._get_key_name(name)
if not name.endswith("/"):
name += "/"
logger.debug("name: %s", name)
files: list[str] = []
dirs: list[str] = []
for obj in oss2.ObjectIterator(self.bucket, prefix=name, delimiter="/"):
if obj.is_prefix():
dirs.append(obj.key)
else:
files.append(obj.key)
logger.debug("dirs: %s", list(dirs))
logger.debug("files: %s", files)
return dirs, files
def url(self, name: str) -> str:
"""
获取文件的url(带token)
:param name: 文件名
:return: url
"""
key = self._get_key_name(name)
url = self.bucket.sign_url("GET", key, expires=self.expire_time)
if self.bucket_acl != oss2.BUCKET_ACL_PRIVATE:
idx = url.find("?")
if idx > 0:
url = url[:idx].replace("%2F", "/")
return url
def delete(self, name: str) -> None:
"""
删除文件
:param name:
:return:
"""
name = self._get_key_name(name)
logger.debug("delete name: %s", name)
self.bucket.delete_object(name)
def delete_dir(self, dirname: str) -> None:
"""
删除文件夹
:param dirname:
:return:
"""
name = self._get_key_name(dirname)
if not name.endswith("/"):
name += "/"
logger.debug("delete name: %s", name)
self.bucket.delete_object(name)
def get_object_acl(self, name: str) -> str:
"""
获取文件的访问权限
:param name: 文件名
:return:
"""
name = self._get_key_name(name)
return self.bucket.get_object_acl(name).acl
def set_object_acl(self, name: str, acl: str) -> None:
"""
设置文件的访问权限
:param name: 文件名
:param acl: 访问权限
:return:
"""
name = self._get_key_name(name)
self.bucket.put_object_acl(name, acl)
class OssMediaStorage(OssStorage):
def __init__(self):
self.base_dir = settings.MEDIA_URL
logger.debug("locatin: %s", self.base_dir)
super(OssMediaStorage, self).__init__()
class OssStaticStorage(OssStorage):
def __init__(self):
self.base_dir = settings.STATIC_URL
logger.info("locatin: %s", self.base_dir)
super(OssStaticStorage, self).__init__()
class OssFile(File):
"""
A file returned from AliCloud OSS
"""
def __init__(
self, content: SpooledTemporaryFile, name: str, storage: OssStorage
):
super(OssFile, self).__init__(content, name)
self._storage = storage
def open(self, mode: str = "rb") -> BinaryIO:
if self.closed:
self.file = self._storage.open(self.name, mode).file
return super(OssFile, self).open(mode) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/oss/backends.py | backends.py |
from datetime import datetime, timedelta
from time import sleep
from typing import Any
import isodate
from meilisearch.models.task import Task as MeiliTask
from zq_django_util.utils.meili.constant.task import TaskStatus, TaskType
from zq_django_util.utils.meili.error import Error
from zq_django_util.utils.meili.exceptions import (
MeiliSearchTaskFail,
MeiliSearchTaskTimeoutError,
)
class AsyncTask:
uid: str
index_uid: str | None
status: TaskStatus
type: TaskType
details: dict[str, Any] | None
error_dict: dict[str, str] | None
canceled_by: int | None
duration: timedelta | None
enqueued_at: datetime | None
started_at: datetime | None
finished_at: datetime | None
def __init__(self, task: MeiliTask):
self.uid = task.uid
self.index_uid = task.index_uid
self.status = TaskStatus(task.status)
self.type = TaskType(task.type)
self.details = task.details
self.error = task.error
self.canceled_by = task.canceled_by
self.duration = (
isodate.parse_duration(task.duration) if task.duration else None
)
self.enqueued_at = (
isodate.parse_datetime(task.enqueued_at)
if task.enqueued_at
else None
)
self.started_at = (
isodate.parse_datetime(task.started_at) if task.started_at else None
)
self.finished_at = (
isodate.parse_datetime(task.finished_at)
if task.finished_at
else None
)
@property
def error(self) -> Error | None:
return Error(self.error_dict) if self.error_dict else None
@error.setter
def error(self, error: dict[str, str] | None):
self.error_dict = error
@property
def is_processing(self) -> bool:
return self.status == TaskStatus.PROCESSING
@property
def is_enqueued(self) -> bool:
return self.status == TaskStatus.ENQUEUED
@property
def is_failed(self) -> bool:
return self.status == TaskStatus.FAILED
@property
def is_succeeded(self) -> bool:
return self.status == TaskStatus.SUCCEEDED
@property
def is_canceled(self) -> bool:
return self.status == TaskStatus.CANCELED
@property
def is_finished(self) -> bool:
return self.is_succeeded or self.is_failed or self.is_canceled
def wait(self, timeout: float = 5.0) -> "AsyncTask":
"""
等待任务完成
:param timeout: 超时时间
:return:
"""
start_time = datetime.now()
while not self.is_finished:
sleep(0.05)
if (datetime.now() - start_time).seconds > timeout:
raise MeiliSearchTaskTimeoutError("任务超时")
if self.is_failed:
raise MeiliSearchTaskFail(self.error)
return self | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/meili/task.py | task.py |
from typing import Any, Type
import meilisearch.index
from django.db.models import Model, QuerySet
from meilisearch import Client
from rest_framework.request import Request
from rest_framework.serializers import BaseSerializer
from zq_django_util.utils.meili.response import SearchResult
PK = int | str
class BaseIndex(meilisearch.index.Index):
"""
Index class for MeiliSearch
index是多个具有同类型document的集合,每个document都有一个唯一的标识符,称为主键。
该类用于以类字段的形式定义index的设置,以及对index的增删改查操作。
:var index_uid: index的唯一标识符 (str)
:var primary_key: 主键 (str | None, 默认为 None, 即自动识别主键)
:var client: MeiliSearch client (Client | None, 默认为None, 即使用 meili_client 单例)
:var class_settings: 是否使用类属性作为index的设置 (bool, 默认为 True)
:var displayed_attributes: 要显示的字段 (list[str], 默认为 ["*"], 即全部字段)
:var searchable_attributes: 要搜索的字段 (list[str], 默认为 ["*"], 即全部字段)
:var filterable_attributes: 要对值进行筛选的字段 (list[str], 默认为 [])
:var sortable_attributes: 要排序的字段 (list[str], 默认为 [])
:var ranking_rules: 排序规则 (list[str], 默认为 ["typo", "words", "proximity", "attribute", "sort", "exactness"])
:var stop_words: 停用词 (list[str], 默认为 [])
:var synonyms: 同义词 (dict[str, list[str]], 默认为 {})
:var distinct_attribute: 去重字段 (str | None, 默认为 None)
:var max_values_per_facet: 每个facet的最大值 (int, 默认为 100)
:var max_total_hits: 搜索结果数目的最大值, 用于分页 (int, 默认为 1000)
"""
index_uid: str
primary_key: str | None = None
client: Client | None = None
class_settings: bool = True
# global
displayed_attributes: list[str] = ["*"]
searchable_attributes: list[str] = ["*"]
filterable_attributes: list[str] = []
sortable_attributes: list[str] = []
ranking_rules: list[str] = [
"typo",
"words",
"proximity",
"attribute",
"sort",
"exactness",
]
stop_words: list[str] = []
synonyms: dict[str, list[str]] = {}
distinct_attribute: str | None = None
# faceting
max_values_per_facet: int = 100
# pagination
max_total_hits: int = 1000
_instance: "BaseIndex" = None
class Meta:
abstract = True
def __init__(self) -> None:
assert (
self.index_uid is not None and self.index_uid != ""
), "index_uid is required"
if self.client is None:
import zq_django_util.utils.meili
self.client = zq_django_util.utils.meili.meili_client
super().__init__(
self.client.config, uid=self.index_uid, primary_key=self.primary_key
)
if self.class_settings:
self.update_settings(self._class_settings)
def __new__(cls, *args, **kwargs):
"""
singleton
"""
if cls._instance is None:
cls._instance = super(BaseIndex, cls).__new__(cls)
return cls._instance
@staticmethod
def _validate_string_list(
name: str, value: list[str], allow_empty: bool = True
):
assert isinstance(value, list), f"{name} must be a list"
assert allow_empty or len(value) > 0, f"{name} must not be empty"
assert all(
isinstance(item, str) for item in value
), f"{name} must be a list of string"
def _validate_class_settings(self):
self._validate_string_list(
"displayedAttributes", self.displayed_attributes, allow_empty=False
)
self._validate_string_list(
"searchableAttributes",
self.searchable_attributes,
allow_empty=False,
)
self._validate_string_list(
"filterableAttributes", self.filterable_attributes
)
self._validate_string_list(
"sortableAttributes", self.sortable_attributes
)
self._validate_string_list(
"rankingRules", self.ranking_rules, allow_empty=False
)
self._validate_string_list("stopWords", self.stop_words)
assert isinstance(self.synonyms, dict), "synonyms must be a dict"
assert all(
isinstance(key, str) for key in self.synonyms.keys()
), "synonyms keys must be a string"
assert all(
isinstance(value, list) for value in self.synonyms.values()
), "synonyms values must be a list of string"
assert all(
isinstance(item, str)
for value in self.synonyms.values()
for item in value
), "synonyms values must be a list of string"
assert self.distinct_attribute is None or isinstance(
self.distinct_attribute, str
), "distinctAttribute must be a string or None"
@property
def _class_settings(self) -> dict[str, Any]:
self._validate_class_settings()
return {
"displayedAttributes": self.displayed_attributes,
"searchableAttributes": self.searchable_attributes,
"filterableAttributes": self.filterable_attributes,
"sortableAttributes": self.sortable_attributes,
"rankingRules": self.ranking_rules,
"stopWords": self.stop_words,
"synonyms": self.synonyms,
"distinctAttribute": self.distinct_attribute,
"faceting": {"maxValuesPerFacet": self.max_values_per_facet},
"pagination": {"maxTotalHits": self.max_total_hits},
}
class BaseIndexHelper:
"""
Index辅助类
配合BaseIndex使用,结合drf的序列化器操作index内的document
:var queryset: QuerySet
:var serializer_class: 序列化器
:var index_class: Index类
"""
queryset: QuerySet
serializer_class: Type[BaseSerializer]
index_class: Type[BaseIndex]
class Meta:
abstract = True
def __init__(self, auto_update=True):
"""
:param auto_update: 根据model信号自动更新索引,默认为True
"""
self.model = self.queryset.model
self.index = self.index_class()
if auto_update:
from django.db.models.signals import post_delete, post_save
post_save.connect(self.model_save_receiver, sender=self.model)
post_delete.connect(self.model_delete_receiver, sender=self.model)
def _to_queryset(
self,
objs: Model | PK | list[Model | PK] | QuerySet,
ignore_default_query_set: bool = False,
) -> QuerySet | None:
"""
将对象、id、id列表、对象列表、QuerySet转换为QuerySet
"""
if not isinstance(objs, list) and not isinstance(objs, QuerySet):
# 将单个对象转换为列表
objs = [objs]
if isinstance(objs, QuerySet) and not ignore_default_query_set:
# 限制QuerySet
objs = objs & self.queryset
if len(objs) > 0 and isinstance(objs[0], PK):
if ignore_default_query_set:
objs = self.model.objects.filter(pk__in=objs)
else:
objs = self.queryset.filter(pk__in=objs)
if len(objs) == 0:
return None
return objs
def upsert_index(
self,
objs: Model | PK | list[Model | PK] | QuerySet,
ignore_default_query_set: bool = False,
):
"""
创建或更新索引
https://docs.meilisearch.com/reference/api/documents.html#add-or-replace-documents
:param objs: id、对象、id列表、对象列表、QuerySet
:param ignore_default_query_set: 忽略默认的query_set限制,默认为False
:return:
"""
objs = self._to_queryset(objs, ignore_default_query_set)
if objs is None:
return
serializer = self.serializer_class(objs, many=True)
data = serializer.data
self.index.add_documents(data, primary_key=self.index.primary_key)
def delete_index(
self,
objs: Model | PK | list[Model | PK] | QuerySet,
ignore_default_query_set: bool = True,
):
"""
删除索引
https://docs.meilisearch.com/reference/api/documents.html#delete-documents-by-batch
:param objs: id、对象、id列表、对象列表、QuerySet
:param ignore_default_query_set: 忽略默认的query_set限制,默认为True
:return:
"""
objs = self._to_queryset(objs, ignore_default_query_set)
if objs is None:
return
self.index.delete_documents([obj.pk for obj in objs])
def rebuild_index(self):
"""
重建索引
https://docs.meilisearch.com/reference/api/documents.html#delete-all-documents
:return:
"""
self.index.delete_all_documents()
self.index.add_documents(
self.serializer_class(
self.queryset,
many=True,
).data,
primary_key=self.index.primary_key,
)
def search(
self,
query: str,
page: int = 1,
hits_per_page: int = 20,
filter: str | list | None = None,
facets: list[str] | None = None,
attributes_to_retrieve: list[str] | None = None,
attributes_to_crop: list[str] | None = None,
crop_length: int | None = None,
crop_marker: str | None = None,
attributes_to_highlight: list[str] | None = None,
highlight_pre_tag: str | None = None,
highlight_post_tag: str | None = None,
show_match_positions: bool | None = None,
sort: list[str] | None = None,
matching_strategy: str | None = None,
) -> SearchResult:
"""
搜索
https://docs.meilisearch.com/reference/api/search.html
:param filter:
:param query: 关键词
:param page:
:param hits_per_page:
:param filter:
:param facets:
:param attributes_to_retrieve:
:param attributes_to_crop:
:param crop_length:
:param crop_marker:
:param attributes_to_highlight:
:param highlight_pre_tag:
:param highlight_post_tag:
:param show_match_positions:
:param sort:
:param matching_strategy:
:return:
"""
query_params = {
"page": page,
"hitsPerPage": hits_per_page,
}
filter and query_params.update({"filter": filter})
facets and query_params.update({"facetsDistribution": facets})
attributes_to_retrieve and query_params.update(
{"attributesToRetrieve": attributes_to_retrieve}
)
attributes_to_crop and query_params.update(
{"attributesToCrop": attributes_to_crop}
)
crop_length and query_params.update({"cropLength": crop_length})
crop_marker and query_params.update({"cropMarker": crop_marker})
attributes_to_highlight and query_params.update(
{"attributesToHighlight": attributes_to_highlight}
)
highlight_pre_tag and query_params.update(
{"highlightPreTag": highlight_pre_tag}
)
highlight_post_tag and query_params.update(
{"highlightPostTag": highlight_post_tag}
)
show_match_positions and query_params.update(
{"showMatchPositions": show_match_positions}
)
sort and query_params.update({"sort": sort})
matching_strategy and query_params.update(
{"matchingStrategy": matching_strategy}
)
return SearchResult(self.index.search(query, query_params))
def search_with_request(
self, query: str, request: Request, **kwargs
) -> SearchResult:
page = int(request.query_params.get("page", 1))
page_size = int(request.query_params.get("page_size", 20))
return self.search(query, page=page, hits_per_page=page_size, **kwargs)
def model_save_receiver(self, instance, **kwargs):
self.upsert_index(instance)
def model_delete_receiver(self, instance, **kwargs):
self.delete_index(instance) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/utils/meili/index.py | index.py |
import base64
import json
import time
from logging import getLogger
from queue import Queue
from threading import Thread
from typing import Dict, List, Optional
from django.core.files.uploadedfile import UploadedFile
from django.db.utils import OperationalError
from django.urls import resolve
from django.utils import timezone
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework_simplejwt.settings import api_settings
import zq_django_util
from zq_django_util.exceptions import ApiException
from zq_django_util.logs.configs import drf_logger_settings
from zq_django_util.logs.models import ExceptionLog, RequestLog
from zq_django_util.logs.types import (
ExceptionLogDict,
FileDataDict,
RequestLogDict,
)
from zq_django_util.logs.utils import (
close_old_database_connections,
get_client_ip,
get_headers,
mask_sensitive_data,
)
from zq_django_util.response.types import ApiExceptionResponse, JSONVal
logger = getLogger("drf_logger")
class HandleLogAsync(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flag = True
self._queue: Queue[(Request, Response, float, float)] = Queue(
maxsize=drf_logger_settings.QUEUE_MAX_SIZE
)
def __enter__(self):
self.daemon = True
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self) -> None:
"""
线程开始
:return:
"""
self.flag = True
self.start_queue_process()
def stop(self) -> None:
"""
强制结束线程
:return:
"""
self.flag = False
self.join()
def prepare_request_log(
self,
request: Request,
response: ApiExceptionResponse,
start_time: float,
end_time: float | None,
) -> Optional[RequestLogDict]:
"""
处理请求日志
:param request:
:param response:
:param start_time:
:param end_time:
:return:
"""
# region 检查是否需要记录日志
if not drf_logger_settings.DATABASE and not drf_logger_settings.SIGNAL:
return
url_name = resolve(request.path_info).url_name
namespace = resolve(request.path_info).namespace
# Always skip Admin panel
if (
namespace == "admin"
or namespace == "__debug__"
or url_name in drf_logger_settings.SKIP_URL_NAME
or namespace in drf_logger_settings.SKIP_NAMESPACE
):
return
# Only log required status codes if matching
if (
drf_logger_settings.STATUS_CODES is not None
and response.status_code not in drf_logger_settings.STATUS_CODES
):
return
# Log only registered methods if available.
if (
drf_logger_settings.METHODS is not None
and request.method not in drf_logger_settings.METHODS
):
return
# endregion
data = self.get_request_log_data(
request, response, start_time, end_time
) # 解析数据
if drf_logger_settings.SIGNAL: # 需要发送信号
# TODO 使用django信号发送日志
pass
if drf_logger_settings.DATABASE: # 需要写入数据库
return data # 返回数据
def put_log_data(
self,
request: Request,
response: Response,
start_time: float,
end_time: float | None = None,
) -> None:
"""
将日志数据放入队列
:param request:
:param response:
:param start_time:
:param end_time:
:return:
"""
self._queue.put((request, response, start_time, end_time))
if self._queue.qsize() >= drf_logger_settings.QUEUE_MAX_SIZE:
# 队列已满,开始处理日志
self._start_log_parse()
def start_queue_process(self):
"""
持续处理日志
:return:
"""
while self.flag:
time.sleep(drf_logger_settings.INTERVAL) # 睡眠
if self.flag:
self._start_log_parse()
def _start_log_parse(self) -> None:
"""
开始处理日志
:return:
"""
request_items: List[RequestLog] = [] # 请求日志
exception_items: List[ExceptionLog] = [] # 异常日志
while not self._queue.empty():
try:
request: Request
response: Response | ApiExceptionResponse
start_time: float
end_time: float | None
request, response, start_time, end_time = self._queue.get()
# 存在异常信息,则为异常日志
if getattr(response, "exception_data", False):
res = self.prepare_exception_log(
request, response, start_time, end_time
)
if res: # 解析后需要插入数据库
exception_items.append(ExceptionLog(**res))
else: # 否则只记录请求日志
res = self.prepare_request_log(
request, response, start_time, end_time
)
if res: # 解析后需要插入数据库
request_items.append(RequestLog(**res))
except Exception:
pass
if request_items or exception_items: # 有日志需要写入数据库
self._insert_into_database(request_items, exception_items)
@staticmethod
@close_old_database_connections
def _insert_into_database(
request_items: List[RequestLog],
exception_items: List[ExceptionLog],
) -> None:
"""
写入数据库
:param request_items: 请求日志列表
:param exception_items: 异常日志列表
:return:
"""
try:
if request_items: # 有请求日志
zq_django_util.logs.models.RequestLog.objects.using(
drf_logger_settings.DEFAULT_DATABASE
).bulk_create(
request_items
) # 批量插入
logger.debug(
f"insert {len(request_items)} request log into database"
)
if exception_items: # 有异常日志
# TODO 无法直接使用bulk_create: Can't bulk create a multi-table inherited model
for item in exception_items: # 逐条插入
item.save(using=drf_logger_settings.DEFAULT_DATABASE)
logger.debug(
f"insert {len(exception_items)} exception log into database"
)
except OperationalError: # 没有相关数据库表
raise Exception(
"""
DRF API LOGGER EXCEPTION
Model does not exists.
Did you forget to migrate?
"""
)
except Exception as e: # 其他异常
logger.error(f"DRF API LOGGER EXCEPTION: {e}")
@classmethod
def prepare_exception_log(
cls,
request: Request,
response: ApiExceptionResponse,
start_time: float,
end_time: float | None,
) -> ExceptionLogDict:
"""
解析异常记录
:param request: 请求
:param response: 响应
:param start_time: 开始时间
:param end_time: 结束时间
:return: 异常数据
"""
data: RequestLogDict = cls.get_request_log_data(
request, response, start_time, end_time
) # 获取请求日志数据
exception_data: ApiException = response.exception_data
data.update(
dict(
exp_id=exception_data.eid or "",
event_id=exception_data.event_id or "",
exception_type=exception_data.exc_data["type"],
exception_msg=exception_data.exc_data["msg"],
exception_info=exception_data.exc_data["info"],
stack_info=exception_data.exc_data["stack"],
)
)
return data
@staticmethod
def get_request_log_data(
request: Request,
response: ApiExceptionResponse,
start_time: float,
end_time: float | None,
) -> RequestLogDict:
"""
解析请求记录
:param request: 请求
:param response: 响应
:param start_time: 开始时间
:param end_time: 结束时间
:return:
"""
# region 获取用户
jwt = request.headers.get("authorization")
try:
if jwt: # 有jwt,解析
payload = jwt.split(" ")[1].split(".")[1]
payload = json.loads(
base64.b64decode(
payload + "=" * (-len(payload) % 4)
).decode()
)
user_id = payload.get(api_settings.USER_ID_CLAIM, None)
else: # 无jwt,使用request内的用户
user_id = (
request.user.id if request.user.is_authenticated else None
)
except Exception:
user_id = None
# endregion
# region 记录请求参数
request_param = request.GET.dict()
request_data: Dict[str, JSONVal] = {}
file_data: Dict[str, FileDataDict] = {}
try:
for key, value in response.api_request_data.items():
if isinstance(value, UploadedFile): # 文件
file_data[key]: FileDataDict = {
"name": value.name,
"size": value.size,
"content_type": value.content_type,
"content_type_extra": value.content_type_extra,
}
else: # 文本数据
request_data[key] = value
except Exception:
pass
# endregion
# region 记录响应数据
response_body = {}
try:
if response.get("content-type") in [
"application/json",
"application/vnd.api+json",
]: # 只记录json格式的响应
if getattr(response, "streaming", False): # 流式响应
response_body = {"__content__": "streaming"}
else: # 文本响应
if type(response.content) == bytes: # bytes类型
response_body = json.loads(response.content.decode())
else: # str类型
response_body = json.loads(response.content)
elif "gzip" in response.get("content-type"):
response_body = {"__content__": "gzip file"}
except Exception:
response_body = {"__content__": "parse error"}
# endregion
# region 记录url
if drf_logger_settings.PATH_TYPE == "ABSOLUTE":
url = request.build_absolute_uri()
elif drf_logger_settings.PATH_TYPE == "FULL_PATH":
url = request.get_full_path()
else:
url = request.build_absolute_uri()
# endregion
headers = get_headers(request=request)
method = request.method
return dict(
user=user_id,
ip=get_client_ip(request),
method=method,
url=url,
headers=mask_sensitive_data(headers),
content_type=request.content_type,
query_param=mask_sensitive_data(request_param),
request_body=mask_sensitive_data(request_data),
file_data=file_data,
response=mask_sensitive_data(response_body),
status_code=response.status_code,
execution_time=end_time - start_time
if start_time and end_time
else None,
create_time=timezone.now(),
) | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/logs/handler.py | handler.py |
import csv
from datetime import timedelta
from typing import TYPE_CHECKING
from django.contrib import admin
from django.db.models import Count
from django.http import HttpResponse
from django.utils.translation import gettext_lazy as _
from zq_django_util.logs import models
from zq_django_util.logs.configs import drf_logger_settings
if TYPE_CHECKING:
from zq_django_util.logs.models import RequestLog
@admin.register(models.ExceptionLog)
class ExceptionLogAdmin(admin.ModelAdmin):
list_per_page = 20 # 每页显示条数
list_display = [
"exp_id",
"exception_type",
"method",
"url",
"ip",
"user",
"create_time",
]
list_display_links = ["exp_id", "exception_type"]
search_fields = ["exp_id", "exception_type", "ip", "url", "user"]
list_filter = ["exception_type", "method", "url", "user"]
ordering = ["-id"]
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(
meta
)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export Selected"
class SlowAPIsFilter(admin.SimpleListFilter):
title = _("API Performance")
# Parameter for the filter that will be used in the URL query.
parameter_name = "api_performance"
def __init__(self, request, params, model, model_admin):
super().__init__(request, params, model, model_admin)
if (
type(drf_logger_settings.ADMIN_SLOW_API_ABOVE) == int
): # Making sure for integer value.
# Converting to seconds.
self._DRF_API_LOGGER_SLOW_API_ABOVE = (
drf_logger_settings.ADMIN_SLOW_API_ABOVE / 1000
)
else:
raise ValueError(
"DRF_LOGGER__ADMIN_SLOW_API_ABOVE must be an integer."
)
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
slow = "Slow, >={}ms".format(drf_logger_settings.ADMIN_SLOW_API_ABOVE)
fast = "Fast, <{}ms".format(drf_logger_settings.ADMIN_SLOW_API_ABOVE)
return (
("slow", _(slow)),
("fast", _(fast)),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# to decide how to filter the queryset.
if self.value() == "slow":
return queryset.filter(
execution_time__gte=self._DRF_API_LOGGER_SLOW_API_ABOVE
)
if self.value() == "fast":
return queryset.filter(
execution_time__lt=self._DRF_API_LOGGER_SLOW_API_ABOVE
)
return queryset
@admin.register(models.RequestLog)
class RequestLogAdmin(admin.ModelAdmin, ExportCsvMixin):
actions = ["export_as_csv"]
def __init__(self, model, admin_site):
super().__init__(model, admin_site)
self._DRF_API_LOGGER_TIMEDELTA = 0
self.list_filter += (SlowAPIsFilter,)
if (
type(drf_logger_settings.ADMIN_TIMEDELTA) == int
): # Making sure for integer value.
self._DRF_API_LOGGER_TIMEDELTA = drf_logger_settings.ADMIN_TIMEDELTA
def added_on_time(self, obj: "RequestLog") -> str:
return (
obj.create_time + timedelta(minutes=self._DRF_API_LOGGER_TIMEDELTA)
).strftime("%d %b %Y %H:%M:%S")
added_on_time.admin_order_field = "create_time"
added_on_time.short_description = "Create at"
list_per_page = 20
list_display = ["method", "url", "status_code", "ip", "user", "create_time"]
list_display_links = ["method", "url"]
search_fields = ["ip", "url", "user"]
list_filter = ["method", "url", "status_code", "user"]
ordering = ["-id"]
change_list_template = "charts_change_list.html"
change_form_template = "change_form.html"
date_hierarchy = "create_time"
def changelist_view(self, request, extra_context=None):
response = super(RequestLogAdmin, self).changelist_view(
request, extra_context
)
try:
filtered_query_set = response.context_data["cl"].queryset
except Exception:
return response
analytics_model = (
filtered_query_set.values("create_time__date")
.annotate(total=Count("id"))
.order_by("total")
)
status_code_count_mode = (
filtered_query_set.values("id")
.values("status_code")
.annotate(total=Count("id"))
.order_by("status_code")
)
status_code_count_keys = list()
status_code_count_values = list()
for item in status_code_count_mode:
status_code_count_keys.append(item.get("status_code"))
status_code_count_values.append(item.get("total"))
extra_context = dict(
analytics=analytics_model,
status_code_count_keys=status_code_count_keys,
status_code_count_values=status_code_count_values,
)
response.context_data.update(extra_context)
return response
def get_queryset(self, request):
return (
super(RequestLogAdmin, self)
.get_queryset(request)
.using(drf_logger_settings.DEFAULT_DATABASE)
)
def changeform_view(
self, request, object_id=None, form_url="", extra_context=None
):
if request.GET.get("export", False):
export_queryset = (
self.get_queryset(request)
.filter(pk=object_id)
.using(drf_logger_settings.DEFAULT_DATABASE)
)
return self.export_as_csv(request, export_queryset)
return super(RequestLogAdmin, self).changeform_view(
request, object_id, form_url, extra_context
)
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/logs/admin.py | admin.py |
import re
from typing import Dict
from django.db import close_old_connections
from rest_framework.request import Request
from zq_django_util.logs.configs import drf_logger_settings
from zq_django_util.response.types import JSONVal
def get_headers(request: Request = None) -> Dict[str, str]:
"""
Function: get_headers(self, request)
Description: To get all the headers from request
"""
regex = re.compile("^HTTP_")
return dict(
(regex.sub("", header), value)
for (header, value) in request.META.items()
if header.startswith("HTTP_")
)
def get_client_ip(request: Request) -> str:
try:
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR")
return ip
except Exception:
return ""
def is_api_logger_enabled() -> bool:
return drf_logger_settings.DATABASE or drf_logger_settings.SIGNAL
def database_log_enabled() -> bool:
return drf_logger_settings.DATABASE
def mask_sensitive_data(data: JSONVal) -> JSONVal:
"""
Hides sensitive keys specified in sensitive_keys settings.
Loops recursively over nested dictionaries.
"""
if type(data) != dict:
return data
for key, value in data.items():
if key in drf_logger_settings.SENSITIVE_KEYS:
length = len(data[key])
data[key] = f"***FILTERED*** (len: {length})"
if type(value) == dict:
data[key] = mask_sensitive_data(data[key])
if type(value) == list:
data[key] = [mask_sensitive_data(item) for item in data[key]]
return data
def close_old_database_connections(func):
"""
自定义decorator,用来装饰使用数据库操作函数
https://stackoverflow.com/questions/59773675/why-am-i-getting-the-mysql-server-has-gone-away-exception-in-django
"""
def wrapper(*args, **kwargs):
close_old_connections()
return func(*args, **kwargs)
return wrapper | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/logs/utils.py | utils.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="RequestLog",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"user",
models.IntegerField(
blank=True, null=True, verbose_name="用户ID"
),
),
("ip", models.CharField(max_length=32, verbose_name="用户IP")),
(
"method",
models.CharField(max_length=32, verbose_name="请求方法"),
),
("url", models.TextField(verbose_name="请求URL")),
("headers", models.JSONField(verbose_name="请求头")),
(
"content_type",
models.CharField(max_length=32, verbose_name="请求类型"),
),
("query_param", models.JSONField(verbose_name="请求参数")),
("request_body", models.JSONField(verbose_name="请求数据")),
("file_data", models.JSONField(verbose_name="文件数据")),
("response", models.JSONField(verbose_name="响应数据")),
(
"status_code",
models.PositiveSmallIntegerField(
db_index=True, verbose_name="响应状态码"
),
),
(
"execution_time",
models.DecimalField(
decimal_places=5,
max_digits=8,
null=True,
verbose_name="执行时间",
),
),
(
"create_time",
models.DateTimeField(
auto_now_add=True, verbose_name="请求时间"
),
),
],
options={
"verbose_name": "请求日志",
"verbose_name_plural": "请求日志",
"db_table": "log_request",
"ordering": ["-create_time"],
},
),
migrations.CreateModel(
name="ExceptionLog",
fields=[
(
"requestlog_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="logs.requestlog",
),
),
(
"exp_id",
models.CharField(max_length=32, verbose_name="异常ID"),
),
(
"exception_type",
models.CharField(max_length=128, verbose_name="异常类型"),
),
(
"event_id",
models.CharField(max_length=32, verbose_name="Sentry事件ID"),
),
("exception_msg", models.TextField(verbose_name="异常信息")),
("exception_info", models.TextField(verbose_name="异常详情")),
("stack_info", models.JSONField(verbose_name="异常栈")),
],
options={
"verbose_name": "异常日志",
"verbose_name_plural": "异常日志",
"db_table": "log_exception",
"ordering": ["-create_time"],
},
bases=("logs.requestlog",),
),
] | zq-django-util | /zq_django_util-0.2.2-py3-none-any.whl/zq_django_util/logs/migrations/0001_initial.py | 0001_initial.py |
import logging
import colorlog
console_color_config = {
'DEBUG': 'white', # cyan white
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
}
fmt = '[%(asctime)s.%(msecs)03d] %(filename)s line:%(lineno)d [%(levelname)s] : %(message)s'
color_fmt = f'%(log_color)s{fmt}'
date_fmt = "%Y-%m-%d %H:%M:%S"
def get_file_handler(log_path = "./log.txt")->logging.FileHandler :
file_formatter = logging.Formatter(
fmt = fmt,
datefmt = date_fmt
)
handler = logging.FileHandler(log_path)
handler.setLevel(logging.DEBUG)
handler.setFormatter(file_formatter)
return handler
def get_stream_handler()->logging.StreamHandler:
console_formatter = colorlog.ColoredFormatter(
fmt = color_fmt,
datefmt = date_fmt,
log_colors = console_color_config
)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(console_formatter)
return handler
def get_logger(logger_name="zq_logger",
log_path = "./log.txt",
enable_file = True,
enable_console = True):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if not logger.handlers:
if enable_file:
logger.addHandler(get_file_handler())
if enable_console:
logger.addHandler(get_stream_handler())
return logger
if __name__ == '__main__':
# show colors
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) # 第一层过滤
logger.addHandler(get_file_handler())
stream_handler = get_stream_handler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
logger.critical("critical")
print()
# test functions
logger = get_logger()
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
logger.critical("critical") | zq-logger | /zq_logger-0.4.tar.gz/zq_logger-0.4/zq_logger/__init__.py | __init__.py |
import configparser
import re
import csv
import os
import json
import codecs
import numpy as np
import datetime
from tqdm import tqdm
import collections
import xlrd
import xlsxwriter
def read_config(conf_path):
"""
根据配置文件的路径读取配置文件,并返回配置文件内容组成的dict
配置文件格式:
[conn_config]
# sql连接配置
host=172.19.50.66
port=5432
user=fpcdpc
password=PASSWORD
database=dpc_db
:param conf_path: 配置文件路径
:return: 配置文件组成的dict
"""
conf_dict = {}
cf = configparser.ConfigParser()
cf.read(conf_path, encoding='utf-8')
secs = cf.sections()
for s in secs:
items = cf.items(s)
for i in items:
conf_dict[i[0]] = i[1]
return conf_dict
def check_and_creat_dir(file_url):
'''
判断文件目录是否存在,文件目录不存在则创建目录
:param file_url: 文件路径,包含文件名
:return:不存在则返回False, 存在True
'''
file_gang_list = file_url.split('/')
if len(file_gang_list) > 1:
[fname, fename] = os.path.split(file_url)
print(fname, fename)
if not os.path.exists(fname):
os.makedirs(fname)
return False
else:
return True
# 还可以直接创建空文件
else:
return True
def getPolygonArea(points):
"""
计算多边形面积
:param points: [[x1, y1], [x2, y2], [x3, y3], [x4, y4], ...]
:return: 面积
"""
sizep = len(points)
if sizep<3:
return 0.0
area = points[-1][0] * points[0][1] - points[0][0] * points[-1][1]
for i in range(1, sizep):
v = i - 1
area += (points[v][0] * points[i][1])
area -= (points[i][0] * points[v][1])
return abs(0.5 * area)
def get_bracketed_content(text):
"""
获取文本中所有小括号中的内容组成的list
如:
香港特(别行)政区(北京点)
return:
['别行', '北京点']
:param text: 文本
:return: 括号中内容组成的list
"""
res = re.findall(r'[((](.*?)[))]', text)
return res
def rm_bracketed(text):
"""
去除文本中的括号,包括括号中的内容,返回去括号后的文本
如:
香港特(别行)政区(北京点)
return:
香港特政区
:param text:文本
:return:去括号后的文本
"""
res = re.sub(u"[((](.*?)[))]|\{.*?\}|\[.*?\]|\<.*?\>", "", text)
return res
def rm_symbol(text):
"""
去除文本中的所有符号,返回去符号后的文本
如:
香港特(别·行)政,区(北京-点)
return:
香港特别行政区北京点
:param text:
:return:
"""
res = re.sub(
"[\s+\.\!\/_, $%^*(+\"\')]|[ \t\r\n\\\\+—-\-()?【】“”!,。?::、~@#¥%……&*()\|「」▏·`▪•۰・●⁺°~’\[\➕;〔〕《–‖﹢〖〗‘》[]◆❤×『\]』。×\\\️=;²∙﹙′★◎〉─③ⅳ―☆㎡〇ⅲ⊂♡⑧℃⑤︱╮₂ⅴⅱ³»①〈╭✘ ※❥・﹚、ⅰ<>›ܶ│丨‧丶]",
"", text)
return res
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, bytes):
return str(obj, encoding='utf-8')
elif isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return super(MyEncoder, self).default(obj)
def write_json(filename, data, isIndent=False, isLine=False):
"""
将字典或者list数据保存为json
:param filename: 保存的文件名
:param data: 要保存的数据
:param isIndent: 是否按照漂亮的格式保存
:param isLine: 是否按行保存
:return:
"""
if not isLine and (isinstance(data, dict) or isinstance(data, list)):
if isIndent:
json_str = json.dumps(data, ensure_ascii=False, indent=4, cls=MyEncoder)
with open(filename, 'w', encoding='utf-8') as json_file:
json_file.write(json_str)
else:
json_str = json.dumps(data, ensure_ascii=False, cls=MyEncoder)
with open(filename, 'w', encoding='utf-8') as json_file:
json_file.write(json_str)
else:
with codecs.open(filename, 'w', 'utf-8') as f:
for formatted_instance in data:
json_str = json.dumps(formatted_instance, ensure_ascii=False, cls=MyEncoder)
f.write(json_str)
f.write('\n')
f.close()
def read_csv(file_path, isdict=False):
"""
读取csv数据
返回数据格式如:
isdict=False(默认):
list,且每个元素也是list,每个元素表示每行数据
[[line1_cell1, line1_cell2], [line2_cell1, line2_cell2], ...]
例如:
[
['class','name','sex','height','year'],
[1,'xiaoming','male',168,23],
[2,'xiaohong','female',162,22],
[3,'xiaozhang','female',163,21],
[4,'xiaoli','male',158,21],
...
]
isdict=True:
list,每个元素是dict,每个元素表示每行数据
[{key1: line1_cell1, key2, line1_cell2, ...}, {key1: line2_cell1, key2, line2_cell2, ...}, ...]
例如:
[
{'class': '1', 'name': 'xiaoming', 'sex': 'male', 'height': '168', year: '23'},
{'class': '2', 'name': 'xiaohong', 'sex': 'female', 'height': '162', year: '22'},
{'class': '3', 'name': 'xiaozhang', 'sex': 'female', 'height': '163', year: '21'},
{'class': '4', 'name': 'xiaoli', 'sex': 'male', 'height': '158', year: '21'},
...
]
:param file_path:csv文件路径
:param isdict: 返回数据格式,默认False, 返回的每行作为一个list, 如果设为True,则每行作为一个dict
:return: list, 根据isdict觉得每个元素的格式
"""
res = []
with open(file_path, encoding='utf-8') as f:
reader = csv.reader(f)
try:
for l in reader:
if len(l) == 0:
continue
res.append(l)
except Exception as e:
print("\033[1;31m 警告:读取csv时发生错误,已经读取到:{} 条数据, 错误内容: {}\033[0m".format(len(res), e))
if isdict:
if len(res) < 1:
return []
keys = []
keys_dict = collections.defaultdict(int)
for k in res[0]:
k = str(k)
keys_dict[k] += 1
if k not in keys:
keys.append(k)
else:
k_new = k + '_' + str(keys_dict[k])
keys.append(k_new)
res_dict = []
for d in res[1:]:
linei = {}
for di, ki in zip(d, keys):
linei[ki] = di
res_dict.append(linei)
return res_dict
return res
def write_csv(filename, data):
"""
将数据写到csv中
支持两种数据格式:
(1) list,且每个元素也是list,每个元素表示每行数据
[[line1_cell1, line1_cell2], [line2_cell1, line2_cell2], ...]
例如:
[
['class','name','sex','height','year'],
[1,'xiaoming','male',168,23],
[2,'xiaohong','female',162,22],
[3,'xiaozhang','female',163,21],
[4,'xiaoli','male',158,21],
...
]
(2) list,每个元素是dict,每个元素表示每行数据
[{key1: line1_cell1, key2, line1_cell2, ...}, {key1: line2_cell1, key2, line2_cell2, ...}, ...]
例如:
[
{'class': '1', 'name': 'xiaoming', 'sex': 'male', 'height': '168', year: '23'},
{'class': '2', 'name': 'xiaohong', 'sex': 'female', 'height': '162', year: '22'},
{'class': '3', 'name': 'xiaozhang', 'sex': 'female', 'height': '163', year: '21'},
{'class': '4', 'name': 'xiaoli', 'sex': 'male', 'height': '158', year: '21'},
...
]
:param filename: 需要写入的csv文件路径
:param data: 需要写入的数据
"""
isdict = False
if len(data) > 0:
if type(data[0]) == dict:
isdict = True
f = open(filename, 'w', encoding='utf-8', newline='')
write_data = []
keys = []
if isdict:
for d in data:
keysi = list(d.keys())
for ki in keysi:
if ki not in keys:
keys.append(ki)
write_data.append(keys)
for d in data:
di = []
for k in keys:
if k in d:
di.append(d[k])
else:
di.append('')
write_data.append(di)
else:
write_data = data
writer = csv.writer(f)
for i in write_data:
writer.writerow(i)
f.close()
def read_excel(filename, sheet_name='', isdict=False):
"""
读取excel数据, 默认读取第一个sheet,可以通过sheet_name决定读取的sheet
返回数据格式如:
isdict=False(默认):
list,且每个元素也是list,每个元素表示每行数据
[[line1_cell1, line1_cell2], [line2_cell1, line2_cell2], ...]
例如:
[
['class','name','sex','height','year'],
[1,'xiaoming','male',168,23],
[2,'xiaohong','female',162,22],
[3,'xiaozhang','female',163,21],
[4,'xiaoli','male',158,21],
...
]
isdict=True:
list,每个元素是dict,每个元素表示每行数据
[{key1: line1_cell1, key2, line1_cell2, ...}, {key1: line2_cell1, key2, line2_cell2, ...}, ...]
例如:
[
{'class': '1', 'name': 'xiaoming', 'sex': 'male', 'height': '168', year: '23'},
{'class': '2', 'name': 'xiaohong', 'sex': 'female', 'height': '162', year: '22'},
{'class': '3', 'name': 'xiaozhang', 'sex': 'female', 'height': '163', year: '21'},
{'class': '4', 'name': 'xiaoli', 'sex': 'male', 'height': '158', year: '21'},
...
]
:param filename: excel文件路径
:param sheet_name: 需要读取的excel中sheet的名称, 默认读取第一个sheet
:param isdict: 返回数据格式,默认False, 返回的每行作为一个list, 如果设为True,则每行作为一个dict
:return: list, 根据isdict觉得每个元素的格式
"""
res = []
data = xlrd.open_workbook(filename)
if sheet_name != "":
table = data.sheet_by_name(sheet_name)
else:
table = data.sheets()[0]
rowNum = table.nrows
colNum = table.ncols
for i in range(rowNum):
row_data = []
for j in range(colNum):
cell_ij = table.cell(i, j)
value = table.cell(i, j).value
if cell_ij.ctype == 4:
if value == 1:
value = True
else:
value = False
if value == 'null':
value = ''
row_data.append(value)
res.append(row_data)
if isdict:
if len(res) < 1:
return None
keys = []
keys_dict = collections.defaultdict(int)
for k in res[0]:
k = str(k)
keys_dict[k] += 1
if k not in keys:
keys.append(k)
else:
k_new = k + '_' + str(keys_dict[k])
keys.append(k_new)
res_dict = []
for d in res[1:]:
linei = {}
for di, ki in zip(d, keys):
linei[ki] = di
res_dict.append(linei)
return res_dict
return res
def write_excel(filename, data, sheet_name='Sheet1'):
"""
将数据写到excel中, 默认sheet_name='Sheet1', 可以自行设置
支持两种数据格式:
(1) list,且每个元素也是list,每个元素表示每行数据
[[line1_cell1, line1_cell2], [line2_cell1, line2_cell2], ...]
例如:
[
['class','name','sex','height','year'],
[1,'xiaoming','male',168,23],
[2,'xiaohong','female',162,22],
[3,'xiaozhang','female',163,21],
[4,'xiaoli','male',158,21],
...
]
(2) list,每个元素是dict,每个元素表示每行数据
[{key1: line1_cell1, key2, line1_cell2, ...}, {key1: line2_cell1, key2, line2_cell2, ...}, ...]
例如:
[
{'class': '1', 'name': 'xiaoming', 'sex': 'male', 'height': '168', year: '23'},
{'class': '2', 'name': 'xiaohong', 'sex': 'female', 'height': '162', year: '22'},
{'class': '3', 'name': 'xiaozhang', 'sex': 'female', 'height': '163', year: '21'},
{'class': '4', 'name': 'xiaoli', 'sex': 'male', 'height': '158', year: '21'},
...
]
:param filename: 需要写入的excel文件路径, 如'a.xlsx'
:param data: 需要写入的数据
:param sheet_name: sheet的名称, 默认为:Sheet1
"""
isdict = False
if len(data) > 0:
if type(data[0]) == dict:
isdict = True
workbook = xlsxwriter.Workbook(filename) # 创建一个excel文件
worksheet = workbook.add_worksheet(sheet_name) # 在文件中创建一个名为TEST的sheet,不加名字默认为sheet1
write_data = []
keys = []
if isdict:
for d in data:
keysi = list(d.keys())
for ki in keysi:
if ki not in keys:
keys.append(ki)
write_data.append(keys)
for d in data:
di = []
for k in keys:
if k in d:
di.append(d[k])
else:
di.append('')
write_data.append(di)
else:
write_data = data
if len(write_data) > 0:
for i in range(len(write_data)):
for j in range(len(write_data[i])):
worksheet.write(i, j, write_data[i][j])
workbook.close()
def read_txt(fileName):
"""
读取txt文件, 返回list, 每行内容为元素
"""
res = []
with open(fileName, "r" ,encoding='utf8') as f:
data = f.readlines()
for d in tqdm(data):
res.append(d.strip('\n'))
return res
def compute_vecsimilar_one_2_one(veca, vecb):
"""
计算一个向量和另一个向量之间的相似度
:param veca: 第一个向量
:param vecb: 第二个向量
:return: 两个向量的相似度
"""
veca = np.array(veca)
vecb = np.array(vecb)
veca = veca / (veca ** 2).sum() ** 0.5
vecb = vecb / (vecb ** 2).sum() ** 0.5
sim = (veca * vecb).sum()
return sim
def compute_vecsimilar__one_2_many(veca, vecs):
"""
计算一条数据和多个向量之间的相似度
分别返回:
最大相似度下标: maxarg
最大相似度对应相似度:maxsim
:param veca: vec
:param vecs:[vec1, vec2, ...]
:return:maxarg, maxsim
"""
veca = np.array(veca)
vecs = np.array(vecs)
veca = veca / (veca ** 2).sum() ** 0.5
vecs = vecs / (vecs ** 2).sum(axis=1, keepdims=True) ** 0.5
sims = np.dot(veca, vecs.T)
maxarg = sims.argmax()
maxsim = max(sims)
return maxarg, maxsim
def compute_vecsimilar__many_2_many(vecsa, vecsb):
"""
计算多个向量和多个向量之间的相似度
分别返回:
maxarg: vecsa中的每条和vecsb中相似度最高的下标
maxsim: vecsa中的每条和vecsb中相似度最高的相似度
:param vecsa:[vec1, vec2, ...]
:param vecsb:[vec1, vec2, ...]
:return:maxargs, maxsims
"""
veca = np.array(vecsa)
vecs = np.array(vecsb)
veca = veca / (veca ** 2).sum(axis=1, keepdims=True) ** 0.5
vecs = vecs / (vecs ** 2).sum(axis=1, keepdims=True) ** 0.5
sims = np.dot(veca, vecs.T)
maxargs = sims.argmax(1)
maxsims = sims.max(1)
return maxargs, maxsims
def LCS(x, y):
c = np.zeros((len(x)+1, len(y)+1))
b = np.zeros((len(x)+1, len(y)+1))
for i in range(1, len(x)+1):
for j in range(1, len(y)+1):
if x[i-1] == y[j-1]:
c[i, j] = c[i-1, j-1]+1
b[i, j] = 2
else:
if c[i-1, j] >= c[i, j-1]:
c[i, j] = c[i-1, j]
b[i, j] = 1
else:
c[i, j] = c[i, j-1]
b[i, j] = 3
return c, b
def getLCS(texta, textb):
"""
获取最长公共序列
如:
x = '荣丰控股集团股份有限公司'
y = '荣丰(天津)医疗器械有限公司'
return:
荣丰有限公司
:param x:
:param y:
:return:
"""
c, b = LCS(texta, textb)
i = len(texta)
j = len(textb)
lcs = ''
while i > 0 and j > 0:
if b[i][j] == 2:
lcs = texta[i-1]+lcs
i -= 1
j -= 1
if b[i][j] == 1:
i -= 1
if b[i][j] == 3:
j -= 1
if b[i][j] == 0:
break
return lcs | zq-pt-albert | /zq_pt_albert-0.1.2-py3-none-any.whl/tool/utils.py | utils.py |
import logging
import colorful as cf
import os
__all__ = ["get_logger", "default_logger"]
allocated_loggers = {}
class ZQ_Logger(logging.Logger):
DEBUG=logging.DEBUG
INFO=logging.INFO
WARN=logging.WARN
WARNING=logging.WARNING
FATAL=logging.FATAL
CRITICAL=logging.CRITICAL
PRANK = 999
def default_color(self, x): return x
color_to_rank = [
cf.italic_red,
cf.italic_yellow,
cf.italic_cyan,
cf.italic_orange,
cf.italic_blue,
cf.italic_magenta,
cf.italic_green,
cf.italic_purple,
]
def __init__(self, name):
super(ZQ_Logger, self).__init__(name)
self.tag = ""
self.print_thread = False
self.print_level = True
self.rank = 0
self.name2handler = dict()
logging.Logger.setLevel(self, logging.DEBUG)
def add_log_file(self, log_file:str, name:str=""):
if not name: name = log_file
if name in self.name2handler: return
handler = logging.FileHandler(log_file)
self.name2handler[name] = handler
self.addHandler(handler)
self.reset_format()
def set_level_for_handler(self, name:str, level:int):
if name not in self.name2handler: return
handler: logging.Handler = self.name2handler[name]
handler.setLevel(level)
def set_level_for_all(self, level:int):
for name in self.name2handler:
handler: logging.Handler = self.name2handler[name]
handler.setLevel(level)
def setLevel(self, *args, **kwargs):
print(f"Warn: `setLevel` is not supported, use `set_level_for_all` instead")
def generate_fmt(self)->logging.StreamHandler:
thread_fmt = "" if not self.print_thread else "[%(threadName)s] "
level_fmt = "" if not self.print_level else " [%(levelname)s]"
basic_fmt = f'[%(asctime)s.%(msecs)03d] {thread_fmt}"%(pathname)s", line %(lineno)d{level_fmt}:{self.tag} %(message)s'
date_fmt = "%Y-%m-%d %H:%M:%S"
fmt = logging.Formatter(
fmt = basic_fmt,
datefmt = date_fmt
)
return fmt
def set_rank(self, rank:int):
self.rank = rank
self._set_tag(f"[Rank {rank}]")
self.default_color = self.color_to_rank[rank%len(self.color_to_rank)]
return self
def reset_format(self):
formatter = self.generate_fmt()
for handler in self.handlers:
handler.setFormatter(formatter)
return self
def _set_tag(self, tag:str):
self.tag = tag
self.reset_format()
return self
def set_print_thread(self, print_thread:bool=True):
self.print_thread = print_thread
self.reset_format()
return self
def prank(self, msg:str, color:str='',*args, **kwargs):
'''print with rank. If color is not specified, use the color format corresponding to the rank'''
if not self.isEnabledFor(self.PRANK): return
color = getattr(cf, color) if color else self.default_color
self._log(self.PRANK, color(msg), args, **kwargs)
def debug(self, msg:str, color:str='',*args, **kwargs):
'''print with rank. If color is not specified, use the color format corresponding to the rank'''
if not self.isEnabledFor(self.DEBUG): return
color = getattr(cf, color) if color else self.default_color
self._log(self.DEBUG, color(msg), args, **kwargs)
def info(self, msg:str, *args, **kwargs):
if self.isEnabledFor(logging.INFO): self._log(logging.INFO, cf.green(msg), args, kwargs)
def warn(self, msg:str, *args, **kwargs):
if self.isEnabledFor(logging.WARN): self._log(logging.WARN, cf.yellow(msg), args, kwargs)
def error(self, msg:str, *args, **kwargs):
if self.isEnabledFor(logging.ERROR): self._log(logging.ERROR, cf.red(msg), args, kwargs)
def fatal(self, msg:str, *args, **kwargs):
if self.isEnabledFor(logging.FATAL): self._log(logging.FATAL, cf.bold_red(msg), args, kwargs)
def prank_root(self, msg:str, color:str='', root=0, *args, **kwargs):
'''print with rank. If color is not specified, use the color format corresponding to the rank'''
if self.rank != root: return
if not self.isEnabledFor(self.PRANK): return
color = getattr(cf, color) if color else self.default_color
self._log(self.PRANK, color(msg), args, **kwargs)
def debug_root(self, msg:str, color:str='', root=0, *args, **kwargs):
'''print with rank. If color is not specified, use the color format corresponding to the rank'''
if self.rank != root: return
if not self.isEnabledFor(self.DEBUG): return
color = getattr(cf, color) if color else self.default_color
self._log(self.DEBUG, color(msg), args, **kwargs)
def info_root(self, msg:str, root=0, *args, **kwargs):
if self.rank != root: return
if self.isEnabledFor(logging.INFO): self._log(logging.INFO, cf.green(msg), args, kwargs)
def warn_root(self, msg:str, root=0, *args, **kwargs):
if self.rank != root: return
if self.isEnabledFor(logging.WARN): self._log(logging.WARN, cf.yellow(msg), args, kwargs)
def error_root(self, msg:str, root=0, *args, **kwargs):
if self.rank != root: return
if self.isEnabledFor(logging.ERROR): self._log(logging.ERROR, cf.red(msg), args, kwargs)
def fatal_root(self, msg:str, root=0, *args, **kwargs):
if self.rank != root: return
if self.isEnabledFor(logging.FATAL): self._log(logging.FATAL, cf.bold_red(msg), args, kwargs)
warning = warn
critical = fatal
warning_root = warn_root
critical_root = fatal_root
def get_level_from_env(logger_name:str, default_level="info"):
level = default_level if logger_name not in os.environ else os.environ[logger_name]
level = level.lower()
level2num = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARN,
"warning": logging.WARN,
"error": logging.ERROR,
"fatal": logging.FATAL,
"critical": logging.FATAL,
}
if level in level2num: return level2num[level]
print(f"Unknown level {level} for logger {logger_name}, use default level {default_level}")
return level2num[default_level]
def get_logger(logger_name="Z_LEVEL",
enable_console = True)->ZQ_Logger:
if logger_name in allocated_loggers: return allocated_loggers[logger_name]
# why need to call `setLoggerClass` twice? refer to the issue: https://bugs.python.org/issue37258
logging.setLoggerClass(ZQ_Logger)
logger:ZQ_Logger = logging.getLogger(logger_name)
logging.setLoggerClass(logging.Logger)
# Initilize level from environment. If not specified, use INFO
if enable_console:
streamHandler = logging.StreamHandler()
name = logger_name
logger.name2handler[name] = streamHandler
streamHandler.setLevel(get_level_from_env(logger_name))
logger.addHandler(streamHandler)
logger.reset_format()
allocated_loggers[logger_name] = logger
return logger
default_logger = get_logger()
if __name__ == '__main__':
def test_environ():
print(f'{"="*20} test environ {"="*20}')
logger1 = get_logger("logger1")
logger1.debug("this message should not be printed due to default initilizing level is INFO")
logger1.set_level_for_all(logger1.DEBUG)
logger1.debug("this message should be printed due to call `set_level_for_all`")
os.environ['logger2'] = "debug"
logger2 = get_logger("logger2")
logger2.debug("this message should be printed due to env `logger` is set to `debug`")
def test_log_file():
print(f'{"="*20} test log file {"="*20}')
# recommend to use `ANSI Color` extention to view log file in VSCode
logger = get_logger("test_log_file")
logger.add_log_file("demo.log")
logger.info(f"this message should be printed to both console and file {logger.name2handler.keys()}")
def test_ranks():
print(f'{"="*20} test ranks {"="*20}')
logger=get_logger("test_ranks")
logger.set_level_for_handler("test_ranks", logger.DEBUG)
logger.debug_root("printed due to default rank is 0 and default style is plain")
logger.debug_root("NOT printed due to default rank is 0", root=2)
for rank in range(8):
logger.set_rank(rank)
logger.debug(f"style of rank {rank}")
def test_styles():
print(f'{"="*20} test styles {"="*20}')
logger=get_logger("test_styles")
logger.info("style of info msg (green)")
logger.warn("style of warn msg (yellow)")
logger.error("style of error msg (red) ")
logger.fatal("style of fatal msg (bold red)")
def test_threads():
print(f'{"="*20} test threads {"="*20}')
logger = get_logger("test_threads")
logger.set_print_thread(print_thread=True)
logger.info(f"this message should be printed with thread id")
test_environ()
test_log_file()
test_ranks()
test_styles()
test_threads() | zq-tools | /zq_tools-1.0.2-py3-none-any.whl/zq_tools/zq_logger.py | zq_logger.py |
import time
import sys
import functools
import inspect
from typing import Any, Callable, TypeVar, cast
from zq_tools.zq_logger import default_logger as logger
# Used for annotating the decorator usage of 'no_grad' and 'enable_grad'.
# See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar('F', bound=FuncType)
class _DecoratorContextManager:
"""Allow a context manager to be used as a decorator"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, func: F) -> F:
if inspect.isgeneratorfunction(func):
return self._wrap_generator(func)
@functools.wraps(func)
def decorate_context(*args, **kwargs):
with self.clone():
return func(*args, **kwargs)
return cast(F, decorate_context)
def _wrap_generator(self, func):
"""Wrap each generator invocation with the context manager"""
@functools.wraps(func)
def generator_context(*args, **kwargs):
gen = func(*args, **kwargs)
# Generators are suspended and unsuspended at `yield`, hence we
# make sure the grad mode is properly set every time the execution
# flow returns into the wrapped generator and restored when it
# returns through our `yield` to our caller (see PR #49017).
try:
# Issuing `None` to a generator fires it up
with self.clone():
response = gen.send(None)
while True:
try:
# Forward the response to our caller and get its next request
request = yield response
except GeneratorExit:
# Inform the still active generator about its imminent closure
with self.clone():
gen.close()
raise
except BaseException:
# Propagate the exception thrown at us by the caller
with self.clone():
response = gen.throw(*sys.exc_info())
else:
# Pass the last request to the generator and get its response
with self.clone():
response = gen.send(request)
# We let the exceptions raised above by the generator's `.throw` or
# `.send` methods bubble up to our caller, except for StopIteration
except StopIteration as e:
# The generator informed us that it is done: take whatever its
# returned value (if any) was and indicate that we're done too
# by returning it (see docs for python's return-statement).
return e.value
return generator_context
def __enter__(self) -> None:
raise NotImplementedError
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
raise NotImplementedError
def clone(self):
# override this method if your children class takes __init__ parameters
return self.__class__(*self.args, **self.kwargs)
def do_nothing(*args, **kwargs):
pass
def pass_it(func): # decorator
return do_nothing
class time_it(_DecoratorContextManager):
def __init__(self,
keyword="",
print_it=print,
sync_func=do_nothing):
super().__init__(keyword=keyword, print_it=print_it, sync_func=sync_func)
self.print_it = print_it
self.sync_func = sync_func
self.keyword = keyword if keyword=="" else f"[{keyword}] "
def __enter__(self):
self.sync_func()
self.start_time = time.time()
def __exit__(self, exc_type: Any, exc_value: Any, traceback:Any):
self.sync_func()
self.stop_time = time.time()
self.print_it(f"{self.keyword}time_it: {self.stop_time-self.start_time}s")
if __name__ == '__main__':
@pass_it
def test():
print("hello in test")
test()
@time_it(keyword="time it as wrapper")
def test():
print("hello in decorator")
test()
with time_it(keyword="time it as context manager"):
print("hello in `with`")
from zq_tools.zq_logger import default_logger as logger
with time_it(keyword="time it as context manager", print_it=logger.info):
print("hello in `with`, print with logger") | zq-tools | /zq_tools-1.0.2-py3-none-any.whl/zq_tools/zq_decorator.py | zq_decorator.py |
import json
import time
import os
import threading
from typing import Union
# json format: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/
__all__ = [
"get_pid", "get_tid",
"record_timestamp",
"record_begin",
"record_end",
"record_duration",
"record_thread_name",
"record_process_name",
"record_process_sort_index",
"record_thread_sort_index",
"record_dump",
"record_init",
"record_append",
"record_begin_async",
"record_end_async",
"set_start_timestamp",
"enable_trace",
"disable_trace"
]
contents = []
start_timestamp = 0
tracing_switch=True
def enable_trace():
global tracing_switch
tracing_switch=True
def disable_trace():
global tracing_switch
tracing_switch=False
def should_trace(func):
def inner(*args, **kwargs):
global tracing_switch
if tracing_switch:
return func(*args, **kwargs)
else:
return
return inner
def set_start_timestamp():
global start_timestamp
start_timestamp = time.time()
def get_pid():
return os.getpid()
def get_tid():
tid = threading.get_ident()
if not tid: tid = 0
return tid
@should_trace
def record_timestamp(name:str,
cat:str,
tid:int,
pid:int,
**kwargs) -> None:
global start_timestamp
j = {
"name":name,
"cat":cat,
"ts": (time.time()-start_timestamp)*1000000,
"pid": get_pid() if pid<0 else pid,
"tid": get_tid() if tid<0 else tid
}
if kwargs:
j["args"] = kwargs
return j
@should_trace
def record_begin(name:str,
cat:str="",
tid=-1,
pid=-1,
**kwargs):
j = record_timestamp(name, cat, tid, pid, **kwargs)
j['ph'] = "B"
contents.append(json.dumps(j))
@should_trace
def record_end(name:str,
cat:str="",
tid=-1,
pid=-1,
**kwargs):
j = record_timestamp(name, cat, tid, pid, **kwargs)
j['ph'] = "E"
contents.append(json.dumps(j))
@should_trace
def record_begin_async(name:str,
id:Union[str,int],
cat:str="",
tid=-1,
pid=-1,
**kwargs
):
j = record_timestamp(name, cat, tid, pid, **kwargs)
j['ph'] = 'b'
j['id'] = id
contents.append(json.dumps(j))
@should_trace
def record_end_async(name:str,
id:Union[str,int],
cat:str="",
tid=-1,
pid=-1,
**kwargs
):
j = record_timestamp(name, cat, tid, pid, **kwargs)
j['ph'] = 'e'
j['id'] = id
contents.append(json.dumps(j))
@should_trace
def record_duration(name:str,
cat:str="",
tid=-1,
pid=-1,
dur:float=0,
**kwargs):
j = record_timestamp(name, cat, tid, pid, **kwargs)
j['ph'] = "X"
j['dur'] = dur
contents.append(json.dumps(j))
@should_trace
def record_thread_name(name:str, tid=-1, pid=-1, **kwargs):
j = {
"name": "thread_name",
"ph": "M",
"pid": get_pid() if pid<0 else pid,
"tid": get_tid() if tid<0 else tid,
}
kwargs['name'] = name
j['args'] = kwargs
contents.append(json.dumps(j))
@should_trace
def record_process_name(name:str, pid=-1, **kwargs):
j = {
"name": "process_name",
'ph': 'M',
'pid': get_pid() if pid<0 else pid,
}
kwargs['name'] = name
j['args'] = kwargs
contents.append(json.dumps(j))
@should_trace
def record_process_sort_index(index:int, pid=-1, **kwargs):
j = {
'name': 'process_sort_index',
'ph': 'M',
'pid': get_pid() if pid<0 else pid,
}
kwargs['sort_index'] = index
j['args'] = kwargs
contents.append(json.dumps(j))
@should_trace
def record_thread_sort_index(index:int, tid=-1, pid=-1, **kwargs):
j = {
'name': 'thread_sort_index',
'ph': 'M',
'pid': get_pid() if pid<0 else pid,
'tid': get_tid() if tid<0 else tid,
}
kwargs['sort_index'] = index
j['args'] = kwargs
contents.append(json.dumps(j))
@should_trace
def record_dump(filename:str):
with open(filename, 'w') as f:
f.write("[\n")
f.write(",\n".join(contents))
f.write("\n]\n")
@should_trace
def record_init(filename:str):
with open(filename, 'w') as f:
f.write("[\n")
@should_trace
def record_append(filename:str):
with open(filename, 'a') as f:
for content in contents:
f.write(f"{content},\n")
if __name__ == '__main__':
# disable_trace()
for i in range(2):
print(f"{i}")
record_begin(name=f"name_{i}", cat="")
time.sleep(1)
record_end(name=f"name_{i}", cat="")
time.sleep(0.5)
record_thread_name(name="thread")
record_process_name(name="process")
record_dump("./test.json") | zq-tools | /zq_tools-1.0.2-py3-none-any.whl/zq_tools/zq_tracing.py | zq_tracing.py |
__author__ = 'Zhang Fan'
from threading import Lock
from zblocker import BLock_more
class queue_close():
pass
class Queue():
'''先进先出队列'''
def __init__(self, maxsize=None):
'''
初始化一个队列
:param maxsize:最大允许放入数量
'''
self.__maxsize = maxsize # 设置后不允许修改,否则可能会出现无法预知的错误
self.__obj_list = [] # 保存数据的对象
self.__obj_count = 0 # 对象数量
self.__close = False # 是否关闭了队列
self.__put_lock = Lock() # put锁
self.__get_lock = Lock() # get锁
self.__join_lock = BLock_more() # 等待锁
self.__get_lock.acquire() # 锁定get
def put(self, obj, show_close_err=True):
'''
放入一个数据, 如果队列已满则阻塞
:param obj: 要放入的数据
:param show_close_err: show_close_err为True时, 如果队列已关闭会报错
:return: 正常放入数据返回True, 队列关闭时返回False或报错
'''
assert not self.__close, '队列已关闭'
self.__put_lock.acquire() # 获取写入权限
if self.__close:
self._unlock_putlock() # 释放写入权限
if not show_close_err:
return False
raise AssertionError('队列已关闭')
self._put(obj)
if self.__obj_count == 1: # 第一次产生队列
self._unlock_getlock() # 释放读取权限
if self.__maxsize and self.__obj_count == self.__maxsize: # 存在最大值并且达到最大值
return True # 不释放写入权限
self._unlock_putlock() # 释放写入权限
return True
def get(self, show_close_err=True):
'''
取出一个数据, 如果没有数据则阻塞
:param show_close_err: show_close_err为True时, 如果队列已关闭会报错
:return: 正常获取数据返回该数据, 队列关闭时返回zqueue.queue_close类或报错
'''
'''获取一个数据,如果队列已关闭会报错'''
self.__get_lock.acquire() # 获取读取权限
if self.__close:
self._unlock_getlock() # 释放读取权限
if not show_close_err:
return queue_close
raise AssertionError('队列已关闭')
obj = self._get()
if self.__maxsize and self.__obj_count == self.__maxsize - 1: # 存在最大值并且刚从最大值降下来
self._unlock_putlock() # 释放写入权限
if self.__obj_count == 0: # 无队列
self.__join_lock.unlock() # 释放等待锁
return obj # 不释放读取权限
self._unlock_getlock() # 释放读取权限
return obj
def qsize(self):
return self.__obj_count
@property
def count(self):
'''获取队列数量'''
return self.__obj_count
def empty(self):
return self.__obj_count == 0
def is_empty(self):
'''如果队列为空返回True,不为空返回False'''
return self.__obj_count == 0
def full(self):
return not self.__maxsize or self.__obj_count == self.__maxsize
def is_full(self):
'''判断队列是否已满'''
return not self.__maxsize or self.__obj_count == self.__maxsize
@property
def is_close(self):
return self.__close
def join(self):
# 等待队列关闭或所有数据被取出
if not self.__close and self.__obj_count > 0:
self.__join_lock.lock()
def close(self):
# 关闭队列
if not self.__close:
self.__close = True
self.__join_lock.unlock()
self._unlock_putlock()
self._unlock_getlock()
def _put(self, obj):
self.__obj_list.append(obj) # 放到最后
self.__obj_count += 1
def _get(self):
return self._get_fifo()
def _get_fifo(self):
obj = self.__obj_list.pop(0) # 取出最前
self.__obj_count -= 1
return obj
def _get_lifo(self):
obj = self.__obj_list.pop() # 取出最后
self.__obj_count -= 1
return obj
def _unlock_putlock(self):
if self.__put_lock.locked():
self.__put_lock.release()
def _unlock_getlock(self):
if self.__get_lock.locked():
self.__get_lock.release()
class LifoQueue(Queue):
'''后进先出队列'''
def _get(self):
return self._get_lifo() # 取出最后
if __name__ == '__main__':
def fun_put():
for i in range(10):
print('put_start')
q.put(i)
print('put', i, '\n')
def fun_get():
while True:
if q.is_empty():
return
print(' get_start')
v = q.get()
print(' get', v)
time.sleep(0.3)
import time
import threading
q = Queue(3)
t1 = threading.Thread(target=fun_put)
t2 = threading.Thread(target=fun_get)
t1.start()
time.sleep(0.5)
t2.start()
q.join()
print('结束') | zqueue | /zqueue-1.2.0-py3-none-any.whl/zqueue.py | zqueue.py |
import os.path as osp
from collections import Iterable
import numpy as np
META_TOKEN = "__virtual_meta"
def valid_tile_size(shape, tile_size):
if isinstance(tile_size, Iterable):
assert len(tile_size) == len(shape), \
"tile_size and shape must have same dimension"
tile_size = np.array(tile_size).astype(np.int)
assert np.all(tile_size > 0), \
f"tile_size must be non-negative {tile_size}"
elif isinstance(tile_size, int):
assert tile_size > 0, f"tile_size must be non-negative {tile_size}"
tile_size = np.array([tile_size] * len(shape), dtype=np.int)
else:
raise ValueError(f"invalid tile_size {tile_size}")
return tile_size
def get_indexing_mat(mat):
return np.arange(mat.size, dtype=np.int).reshape(mat.shape)
def norm_index(nd_index, tile_size):
"""
"""
normed_index = [i for i in np.index_exp[nd_index] if i is not None]
tiled_index = []
current_index = 0
def norm(_index, is_stop=False):
if _index is None:
return _index
i = _index // tile
if is_stop:
# fast way to increase end by 1 to cover the range
i += 1
if i == 0:
i = None
return i
for ind in normed_index:
if np.array_equal(ind, []):
continue
if ind is Ellipsis:
current_index += len(tile_size) - len(normed_index) + 1
tiled_index.append(...)
continue
tile = tile_size[current_index]
if isinstance(ind, int):
tiled_index.append(norm(ind))
elif isinstance(ind, np.ndarray) and np.issubdtype(ind.dtype, np.signedinteger):
tiled_index.append(ind//tile)
elif isinstance(ind, slice):
if ind.step is None or ind.step > 0:
start = norm(ind.start)
stop = norm(ind.stop, is_stop=True)
else:
start = norm(ind.stop)
stop = norm(ind.start, is_stop=True)
tiled_index.append(slice(start, stop))
else:
raise NotImplementedError(
f"unknown index {ind}, {normed_index}, {nd_index}")
current_index += 1
return tuple(tiled_index)
def index_to_slice(nd_index, tile_size, shape=None):
if shape is None:
shape = tile_size
slices = [slice(i * t, i * t + s)
for i, t, s in zip(nd_index, tile_size, shape)]
return tuple(slices)
class VirtualData():
def __init__(self, shape, tile_size, data_handler, name):
self.shape = shape
self.tile_size = valid_tile_size(shape, tile_size)
self.data_handler = data_handler
assert not name.startswith("__"), "'__' prefix is prohibited"
self.name = name
self.tile_num = np.ceil(shape / self.tile_size).astype(np.int)
self.empty_tiles = np.ones(self.tile_num, dtype=np.bool)
self.tile_index = get_indexing_mat(self.empty_tiles)
self._data = None
@property
def data(self):
return self._data
@staticmethod
def indexed_name(name, index):
return f"__{name}_{index}"
@staticmethod
def make_tile(data, tile_size, name=None):
tile_size = valid_tile_size(data.shape, tile_size)
tile_num = tuple(np.ceil(data.shape / tile_size).astype(np.int))
result = {} if name else []
for index, nd_index in enumerate(np.ndindex(tile_num)):
indexing = index_to_slice(nd_index, tile_size)
v = data[indexing]
if name:
indexed_name = VirtualData.indexed_name(name, index)
result[indexed_name] = v
else:
result.append(v)
return result
def __getitem__(self, inds):
t_inds = norm_index(inds, self.tile_size)
load_inds = self.tile_index[t_inds][self.empty_tiles[t_inds]]
for ind in np.unique(load_inds):
self.load(ind)
if self._data is None:
return None
return self._data[inds]
def load(self, index):
load_name = self.indexed_name(self.name, index)
assert load_name in self.data_handler.files, \
f"{load_name} does not exist"
v = self.data_handler.get(load_name)
if self._data is None:
self._data = np.zeros((self.shape), dtype=v.dtype)
nd_index = tuple([i[0]
for i in np.where(self.tile_index == index)])
if not self.empty_tiles[nd_index]:
print(f"{index}, {nd_index} was loaded")
slices = index_to_slice(nd_index, self.tile_size, v.shape)
self._data[slices] = v
self.empty_tiles[nd_index] = False
def load_virtual_data(path, variable_list=None):
success = True
data_dict = {}
if not osp.exists(path):
return False, {}
try:
handler = np.load(path, allow_pickle=True)
if META_TOKEN in handler:
virtual_meta = handler[META_TOKEN].item()
else:
virtual_meta = []
if variable_list is None:
# loading all variables
variable_list = [k for k in handler.files
if not k.startswith("__")]
variable_list.extend(virtual_meta.keys())
for key in variable_list:
if key in virtual_meta:
# virtual data
meta = virtual_meta[key]
shape = meta["shape"]
tile_size = meta["tile_size"]
obj = VirtualData(shape, tile_size, handler, key)
elif key in handler:
# regular data
obj = handler[key]
if obj.shape == ():
obj = obj.item()
else:
# no data
success = False
continue
data_dict[key] = obj
except Exception:
success = False
return success, data_dict
def save_virtual_data(data_dict, path, virtual_meta_dict=(), compress=False):
new_data_dict = {}
new_meta_dict = {}
for key, data in data_dict.items():
if key in virtual_meta_dict:
meta = virtual_meta_dict[key]
tile_size = meta["tile_size"]
tiled_data_dict = VirtualData.make_tile(data, tile_size, key)
new_data_dict.update(tiled_data_dict)
meta["shape"] = data.shape
new_meta_dict[key] = meta
else:
new_data_dict[key] = data
new_data_dict[META_TOKEN] = new_meta_dict
if compress:
np.savez_compressed(path, **new_data_dict)
else:
np.savez(path, **new_data_dict)
return new_data_dict
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/virtual_data.py | virtual_data.py |
import time
from collections import OrderedDict
from contextlib import contextmanager
import numpy as np
def sync_cuda():
try:
import torch
for idx in range(torch.cuda.device_count()):
torch.cuda.synchronize(idx)
except Exception:
pass
def search_dir(module, key=""):
"""
the one-liner dir
"""
return [i for i in dir(module) if key in i.lower()]
class TimeCounter(object):
def __init__(self, sync=False, verbose=False):
self.clocks = OrderedDict()
self.sync = sync
self.verbose = verbose
self._last_name = None
def tic(self, name):
if self.sync:
sync_cuda()
if name not in self.clocks:
self.clocks[name] = {"times": [], "last_clock": 0}
self.clocks[name]["last_clock"] = time.time()
self._last_name = name
def toc(self, name=None):
name = self._last_name if name is None else name
if self.sync:
sync_cuda()
if name in self.clocks:
time_spend = time.time() - self.clocks[name]["last_clock"]
if self.verbose:
print(f"[{name}] {time_spend:.3f}s")
self.clocks[name]["times"].append(time_spend)
def toctic(self, name):
if self._last_name is None:
# no entry yet
self.tic("all")
else:
self.toc()
self.tic(name)
@contextmanager
def timeit(self, name):
self.tic(name)
yield
self.toc(name)
def get_time(self, name, mode="mean"):
if name not in self.clocks:
return -1
times = self.clocks[name]["times"]
if len(times) == 0:
return -1
if mode == "mean":
return np.float32(times).mean()
elif mode == "last":
return times[-1]
elif mode == "sum":
return np.float32(times).sum()
elif mode == "raw":
return times
elif mode == "count":
return len(times)
else:
return ValueError(f"unknwon mode {mode}")
def get_keys(self):
return self.clocks.keys()
def get_str(self, mode="mean", deliminator="\n", with_runs=True):
def _str(name):
times = self.get_time(name, mode=mode)
if with_runs:
count = self.get_time(name, mode="count")
return f"[{name}] {times:.3f}s/{count}r"
else:
return f"[{name}] {times:.3f}s"
return deliminator.join([_str(name) for name in self.clocks])
def __repr__(self):
for name, info in self.clocks.items():
if len(info["times"]) == 0:
if self.verbose:
print(f"toc on [{name}] for closure")
self.toc(name)
return self.get_str(mode="mean", deliminator=" | ", with_runs=True)
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/debug.py | debug.py |
import numpy as np
import os
import subprocess
import sys
from collections import defaultdict
import PIL
__all__ = ["collect_env_info"]
def collect_torch_env():
try:
import torch.__config__
return torch.__config__.show()
except ImportError:
# compatible with older versions of pytorch
from torch.utils.collect_env import get_pretty_env_info
return get_pretty_env_info()
def collect_env_info():
import torch
data = []
data.append(("sys.platform", sys.platform))
data.append(("Python", sys.version.replace("\n", "")))
data.append(("Numpy", np.__version__))
# data.append(get_env_module())
data.append(("PyTorch", torch.__version__))
data.append(("PyTorch Debug Build", torch.version.debug))
try:
import torchvision
data.append(("torchvision", torchvision.__version__))
except Exception:
data.append(("torchvision", "unknown"))
has_cuda = torch.cuda.is_available()
data.append(("CUDA available", has_cuda))
if has_cuda:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
data.append(("GPU " + ",".join(devids), name))
from torch.utils.cpp_extension import CUDA_HOME
data.append(("CUDA_HOME", str(CUDA_HOME)))
if CUDA_HOME is not None and os.path.isdir(CUDA_HOME):
try:
nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
nvcc = subprocess.check_output(
"'{}' -V | tail -n1".format(nvcc), shell=True)
nvcc = nvcc.decode("utf-8").strip()
except subprocess.SubprocessError:
nvcc = "Not Available"
data.append(("NVCC", nvcc))
cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
if cuda_arch_list:
data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
data.append(("Pillow", PIL.__version__))
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
pass
try:
import tabulate
env_str = tabulate(data) + "\n"
except ImportError:
pass
env_str = ""
env_str += collect_torch_env()
return env_str
if __name__ == "__main__":
print(collect_env_info()) | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/collect_env_info.py | collect_env_info.py |
import cv2
import numpy as np
DEFAULT_PALETTE = [3**7 - 1, 2**7 - 1, 5**9 - 1]
def get_colors(labels, palette=DEFAULT_PALETTE):
"""
Simple function convert label to color
Args:
labels (int or list[int])
palette ([R, G, B])
Return:
return (2d np.array): N x 3, with dtype=uint8
"""
colors = np.array(labels).reshape(-1, 1) * palette
colors = (colors % 255).astype("uint8")
return colors
def get_color_bar(colors, size, is_vertical):
num = len(colors)
# get appropriate patch size for each cell
h, w = size
if is_vertical:
w *= num
else:
h *= num
hs = [h//num] * num
ws = [w//num] * num
# hanlde the residual
hs[0] += h - sum(hs)
ws[0] += w - sum(ws)
patch_list = []
for color, h, w in zip(colors, hs, ws):
if isinstance(colors, dict):
label = color
color = colors[label]
p = np.ones((h, w, 3))*color
p = cv2.putText(p, str(label), (w//4, h*3//4),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
else:
p = np.ones((h, w, 3))*color
patch_list.append(p)
if is_vertical:
patch = np.vstack(patch_list)
else:
patch = np.hstack(patch_list)
return patch.astype(np.uint8)
def test_get_color_bar():
import boxx
colors = get_colors([1, 2, 3, 4, 5])
size = (40, 200)
is_vertical = False
img = get_color_bar(colors, size, is_vertical)
boxx.show(img)
def img_to_uint8(img, img_max=None, img_min=None):
img_float = img.astype(np.float32)
if img_max is None:
img_max = img_float.max()
if img_min is None:
img_min = img_float.min()
img_float = (img_float - img_min) / (img_max - img_min) * 255.0
return img_float.clip(0, 255).astype(np.uint8)
def overlay_bboxes(image, boxes, labels=None, colors=None,
line_thickness=5, colorbar=0):
image = np.array(image)
if image.ndim == 2:
image = image[..., None]
assert image.ndim == 3, f"unknown image shape {image.shape}"
if image.shape[0] in (1, 3):
# chw -> hwc
image = image.transpose(1, 2, 0).copy()
if image.shape[-1] == 1:
image = image.repeat(3, -1)
# image should be h * w * 3 now
if colors is None:
if labels is None:
colors = [None] * len(boxes)
else:
assert len(labels) == len(boxes), "box label size mismatch"
colors = get_colors(labels)
boxes = np.array(boxes)
if image.dtype != np.uint8:
image = img_to_uint8(image)
image = np.ascontiguousarray(image)
for box, color in zip(boxes, colors):
box = box.round()
if color is None:
color = (255, 0, 0) # default is red
else:
color = tuple(map(int, color))
image = cv2.rectangle(image,
(int(box[0]), int(box[1])),
(int(box[2]), int(box[3])),
color, line_thickness)
if colorbar > 0:
h, w = image.shape[:2]
if labels is not None:
colors = dict(zip(labels, colors))
if colorbar == 1:
bar = get_color_bar(colors, (h, 40), is_vertical=True)
image = np.hstack([image, bar])
else:
bar = get_color_bar(colors, (40, w), is_vertical=False)
image = np.vstack([image, bar])
return image
def test_overlay_bboxes():
import boxx
img = np.random.random((200, 200))
bbox = [[10, 10, 40, 40], [20, 100, 50, 150]]
labels = [1, 3]
img = overlay_bboxes(img, bbox, labels=labels, colorbar=2)
boxx.show(img)
def get_img_rois(img,
boxes,
masks=None,
texts=None,
padding=100,
line_thickness=1,
font_size=0.5,
color=(255, 255, 255),
alpha=1.0):
if not isinstance(img, np.ndarray):
img = np.array(img)
if len(img.shape) == 2:
img = img[..., None]
if img.shape[-1] == 1:
img = img.repeat(3, -1)
rois = np.array(boxes).round().astype(int)
h, w = img.shape[:2]
imgs_list = []
if masks is None:
masks = [None] * len(rois)
if texts is None:
texts = [""] * len(rois)
img_info = np.iinfo(img.dtype)
for roi, mask, text in zip(rois, masks, texts):
x0, y0, x1, y1 = roi
x0, x1 = np.clip([x0 - padding, x1 + padding], 0, w)
y0, y1 = np.clip([y0 - padding, y1 + padding], 0, h)
new_img = img[y0:y1, x0:x1, :].copy()
if mask is not None:
new_img = new_img.astype(float)
mask = np.array(mask).squeeze()
if mask.shape == img.shape[:2]:
mask = mask[y0:y1, x0:x1, None] * color
new_img = (1.0-alpha) * new_img + alpha * mask
else:
mask = cv2.resize(mask,
(roi[2] + 1 - roi[0], roi[3] + 1 - roi[1]))
mask = mask[..., None] * color
mx0, mx1 = roi[0] - x0, roi[2] + 1 - x0
ix0, ix1 = np.clip([mx0, mx1], 0, x1 - x0)
my0, my1 = roi[1] - y0, roi[3] + 1 - y0
iy0, iy1 = np.clip([my0, my1], 0, y1 - y0)
new_img[iy0:iy1, ix0:ix1] *= (1.0-alpha)
new_img[iy0:iy1, ix0:ix1] += alpha * \
mask[(iy0 - my0):(iy1 - my0), (ix0 - mx0):(ix1 - mx0)]
new_img = new_img.clip(img_info.min,
img_info.max).astype(img.dtype)
if text:
cv2.putText(new_img, str(text), (padding, padding),
cv2.FONT_HERSHEY_SIMPLEX, font_size, color,
line_thickness)
imgs_list.append(new_img)
return imgs_list
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/plot.py | plot.py |
from typing import Dict, Optional, Tuple
class Registry(object):
"""
The registry that provides name -> object mapping, to support third-party
users' custom modules.
To create a registry (e.g. a backbone registry):
.. code-block:: python
BACKBONE_REGISTRY = Registry('BACKBONE')
To register an object:
.. code-block:: python
@BACKBONE_REGISTRY.register()
class MyBackbone():
...
Or:
.. code-block:: python
BACKBONE_REGISTRY.register(MyBackbone)
"""
def __init__(self, name: str) -> None:
"""
Args:
name (str): the name of this registry
"""
self._name: str = name
self._obj_map: Dict[str, object] = {}
def _do_register(self, name: str, obj: object) -> None:
assert (
name not in self._obj_map
), "An object named '{}' was already registered in '{}' registry!".format(
name, self._name
)
self._obj_map[name] = obj
def register(self, obj: object = None) -> Optional[object]:
"""
Register the given object under the the name `obj.__name__`.
Can be used as either a decorator or not. See docstring of this class for usage.
"""
if obj is None:
# used as a decorator
def deco(func_or_class: object) -> object:
name = func_or_class.__name__ # pyre-ignore
self._do_register(name, func_or_class)
return func_or_class
return deco
# used as a function call
name = obj.__name__ # pyre-ignore
self._do_register(name, obj)
def get(self, name: str) -> object:
ret = self._obj_map.get(name)
if ret is None:
raise KeyError(
"No object named '{}' found in '{}' registry!".format(
name, self._name
)
)
return ret
__getitem__ = get
def has(self, name: str) -> bool:
"""
less forceful way of checking if something is already registered
"""
return name in self._obj_map
def keys(self) -> Tuple[str]:
return tuple(self._obj_map.keys())
def values(self) -> Tuple[object]:
return tuple(self._obj_map.values())
def __repr__(self) -> str:
return f"<{self._name} Registry with {len(self._obj_map)} items>"
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/registry.py | registry.py |
def get_value_from_dict_safe(d, key, default=None):
"""
get the value from dict
args:
d: {dict}
key: {a hashable key, or a list of hashable key}
if key is a list, then it can be assumed the d is a nested dict
default: return value if the key is not reachable, default is None
return:
value
"""
assert isinstance(d, dict), f"only supports dict input, {type(d)} is given"
if isinstance(key, (list, tuple)):
for _k in key[:-1]:
if _k in d and isinstance(d[_k], dict):
d = d[_k]
else:
return default
key = key[-1]
return d.get(key, default)
def set_value_to_dict_safe(d, key, value, append=False):
"""
set the value to dict
args:
d: {dict}
key: {a hashable key, or a list of hashable key}
if key is a list, then it can be assumed the d is a nested dict
value: value to be set
append: if the value is appended to the list, default is False
return:
bool: if the value is succesfully set
"""
assert isinstance(d, dict), f"only supports dict input, {type(d)} is given"
if isinstance(key, (list, tuple)):
for _k in key[:-1]:
if _k in d:
if isinstance(d[_k], dict):
d = d[_k]
else:
return False
else:
d[_k] = dict()
d = d[_k]
key = key[-1]
if append:
if key not in d:
d[key] = [value]
elif isinstance(d[key], list):
d[key].append(value)
else:
return False
else:
d[key] = value
return True
def visualize_dict(d, indent="--", level=0):
"""
print out the the dict strctures, unwraping list by 1-depth
"""
prefix = indent * level
if prefix:
prefix = " |" + prefix
if isinstance(d, (list, tuple)):
print(prefix, f"[{type(d[0])} * {len(d)}]")
return
elif not isinstance(d, dict):
print(prefix, f"{type(d)}")
return
for k, v in d.items():
if isinstance(v, (dict, )):
print(prefix, k)
visualize_dict(v, indent=indent, level=level + 1)
elif isinstance(v, (list, tuple)):
print(prefix, k)
if isinstance(v[0], (dict, )):
for i in v:
visualize_dict(i, indent=indent, level=level + 1)
else:
visualize_dict(v, indent=indent, level=level + 1)
else:
print(prefix, k, v)
def recursive_update(default, custom):
"""
https://github.com/Maples7/dict-recursive-update/blob/master/dict_recursive_update/__init__.py
"""
if not isinstance(default, dict) or not isinstance(custom, dict):
raise TypeError('Params of recursive_update should be dicts')
for key in custom:
if isinstance(custom[key], dict) and isinstance(
default.get(key), dict):
default[key] = recursive_update(default[key], custom[key])
else:
default[key] = custom[key]
return default
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/dict.py | dict.py |
import os
import os.path as osp
import shutil
import numpy as np
from psutil import disk_partitions, disk_usage
from joblib import Parallel, delayed
from zqy_utils import filesize_to_str, make_dir
"""
utility functions to copy from remote disk to multiple local disks
"""
def get_avaliable_disks(min_size=1024, ignored_disks=("/", "/boot")):
disks_list = []
for disk in disk_partitions():
path = disk.mountpoint
if path in ignored_disks:
continue
if disk_usage(path).free > min_size:
disks_list.append(path)
return disks_list
def get_ready_disks(disks_list, total=1024):
for disk in disks_list[:]:
if not os.access(disk, os.W_OK):
print(f"cant make dir in {disk}, maybe you dont have right to ")
disks_list.remove(disk)
size_list = sorted([disk_usage(path).free for path in disks_list])
disks_list = sorted(disks_list, key=lambda path: disk_usage(path).free)
for i, size in enumerate(size_list):
if size > total/(len(size_list) - 1) + 1:
break
else:
return []
return disks_list[i:]
def copy_and_link(src_root, relative_path, files_list, target_root, overwrite=True):
target_dir = make_dir(target_root, relative_path)
username = os.environ["USER"]
for path in files_list:
filename = osp.basename(path)
src_dir = make_dir(src_root, username, relative_path)
src_file = osp.join(src_dir, filename)
dst_file = osp.join(target_dir, filename)
if osp.exists(dst_file):
if overwrite:
os.remove(dst_file)
else:
raise ValueError(f"{dst_file} already exist")
shutil.copy2(path, src_file)
os.symlink(src_file, dst_file)
def make_copies(src_root_list, target_root, relative_path,
disks_list=None,
random=False):
"""
if relative_path is Noner
final_root = {target_root}/{relative_path}
"""
final_root = make_dir(target_root, relative_path)
if isinstance(src_root_list, str):
src_root_list = [src_root_list]
file_list = []
size_list = []
for src_root in src_root_list:
if osp.isfile(src_root):
file_list.append(src_root)
size_list.append(osp.getsize(src_root))
elif osp.isdir(src_root):
for root, dirs, files in os.walk(src_root):
for f in files:
path = osp.join(root, f)
file_list.append(path)
size_list.append(osp.getsize(path))
else:
raise ValueError(f"unsupported type: {src_root}")
total_size = sum(size_list)
total_size_str = filesize_to_str(total_size)
print(f"copying {len(file_list)}({total_size_str}) to {final_root}")
if disks_list is None:
disks_list = get_avaliable_disks()
# from small to large
disks_list = get_ready_disks(disks_list)
# todo: make balanced loaders
indices = range(len(file_list))
if random:
indices = np.random.permutation(indices)
size = 0
files = []
thresh = total_size/len(disks_list)
mapping_dict = {}
for index in indices:
files.append(file_list[index])
size += size_list[index]
if size > thresh:
for disk in disks_list[:]:
if disk_usage(disk).free > size:
mapping_dict[disk] = files
print(
f"will copy {len(files)}({filesize_to_str(size)}) files to {disk}")
files = []
size = 0
disks_list.remove(disk)
break
else:
raise ValueError(f"no disk larger than {size}")
disk = disks_list[0]
assert len(disks_list) == 1 and disk_usage(
disk).free > size, "soemthing wrong"
mapping_dict[disk] = files
print(f"will copy {len(files)}({filesize_to_str(size)}) files to {disk}")
param_list = [[disk, relative_path, file_list, target_root]
for disk, file_list in mapping_dict.items()]
Parallel(n_jobs=len(mapping_dict))(delayed(copy_and_link)(*param)
for param in param_list)
return mapping_dict
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/dist_copy.py | dist_copy.py |
import os.path as osp
import numpy as np
import SimpleITK as sitk
DEFAULT_DICOM_TAG = {
"patientID": "0010|0020",
"studyUID": "0020|000d",
"seriesUID": "0020|000e",
"customUID": "0008|103e",
"image_pixel_spacing": "0018|1164",
"instance_number": "0020|0013",
"manufacturer": "0008|0070",
"body_part": "0018|0015",
"body_part_thickness": "0018|11a0",
"primary_angle": "0018|1510",
"view": "0018|5101",
"laterality": "0020|0062",
"window_center": "0028|1050",
"window_width": "0028|1051",
"rescale_intercept": "0028|1052",
"rescale_slope": "0028|1053",
"patient_orientation": "0020|0020",
"PresentationLUTShape": "2050|0020",
"sop_instance_uid": "0008|0018"
}
def is_valid_file(filename, verbose=True):
"""
given filename, check if it's valid
"""
if not isinstance(filename, str):
if verbose:
print(f"{filename} is not string")
return False
if not osp.exists(filename):
if verbose:
print(f"{filename} does not exist")
return False
if not osp.isfile(filename):
if verbose:
print(f"{filename} exists, but is not file")
return False
return True
def sitk_read_image(img_path, as_np=False):
if not is_valid_file(img_path):
return None
try:
img = sitk.ReadImage(img_path)
if as_np:
img = sitk.GetArrayFromImage(img)
except Exception:
print(f"[Error] unable to load img_path {img_path}, "
"perhaps its not standard format")
return None
return img
def get_image_info_from_image(img_itk, info=None):
"""
read dicom tags and return their values as dict
args:
img_itk (sitk.Image): the itk image
info (dict{tag_name->tag_position})
return:
info_dict: the dicom tag values, default is 'None'
"""
parsing_tags = DEFAULT_DICOM_TAG.copy()
if info is not None:
parsing_tags.update(info)
info_dict = {tag: None for tag in parsing_tags}
assert isinstance(img_itk, sitk.Image), "only supports itk image as input"
for tag, meta_key in parsing_tags.items():
try:
info_dict[tag] = img_itk.GetMetaData(meta_key).strip(" \n")
except Exception:
info_dict[tag] = None
return info_dict
def get_image_info(img_path, info=None):
if isinstance(img_path, sitk.Image):
return get_image_info_from_image(img_path, info)
parsing_tags = DEFAULT_DICOM_TAG.copy()
if info is not None:
parsing_tags.update(info)
info_dict = {tag: None for tag in parsing_tags}
if not is_valid_file(img_path):
return info_dict
reader = sitk.ImageFileReader()
reader.SetFileName(img_path)
reader.LoadPrivateTagsOn()
reader.ReadImageInformation()
all_keys = reader.GetMetaDataKeys()
for tag, meta_key in parsing_tags.items():
if meta_key in all_keys:
info_dict[tag] = reader.GetMetaData(meta_key).strip(" \n")
else:
info_dict[tag] = None
return info_dict
def sitk_read_image_series(image_series, uid=None, verbose=False):
"""
reading image series into a 3d image stack
"""
reader = sitk.ImageSeriesReader()
if isinstance(image_series, (list, set, tuple)):
if not np.all([osp.exists(path) for path in image_series]):
print(
"[WARNING] some images are missing"
)
elif isinstance(image_series, str):
if not osp.isdir(image_series):
print("[ERROR] specified directory is not existed")
return
else:
if uid is None:
image_series = reader.GetGDCMSeriesFileNames(
image_series, loadSequences=True)
else:
image_series = reader.GetGDCMSeriesFileNames(
image_series, uid, loadSequences=True)
try:
if verbose:
print(image_series)
reader.SetFileNames(image_series)
img_itk = reader.Execute()
except Exception:
img_itk = None
return img_itk
def update_tags(img_path, update_dict):
"""
update tags
Args:
img_path(str): path
update_dict(dict{tag_key: value})
"""
img = sitk.ReadImage(img_path)
for key, value in update_dict.items():
if key in DEFAULT_DICOM_TAG:
key = DEFAULT_DICOM_TAG[key]
img.SetMetaData(key, value)
sitk.WriteImage(img, img_path)
_SITK_INTERPOLATOR_DICT = {
'nearest': sitk.sitkNearestNeighbor,
'linear': sitk.sitkLinear,
'gaussian': sitk.sitkGaussian,
'label_gaussian': sitk.sitkLabelGaussian,
'bspline': sitk.sitkBSpline,
'hamming_sinc': sitk.sitkHammingWindowedSinc,
'cosine_windowed_sinc': sitk.sitkCosineWindowedSinc,
'welch_windowed_sinc': sitk.sitkWelchWindowedSinc,
'lanczos_windowed_sinc': sitk.sitkLanczosWindowedSinc
}
def resample_sitk_image(sitk_image, spacing=None, interpolator=None,
fill_value=0):
# https://github.com/jonasteuwen/SimpleITK-examples/blob/master/examples/resample_isotropically.py
"""Resamples an ITK image to a new grid. If no spacing is given,
the resampling is done isotropically to the smallest value in the current
spacing. This is usually the in-plane resolution. If not given, the
interpolation is derived from the input data type. Binary input
(e.g., masks) are resampled with nearest neighbors, otherwise linear
interpolation is chosen.
Parameters
----------
sitk_image : SimpleITK image or str
Either a SimpleITK image or a path to a SimpleITK readable file.
spacing : tuple
Tuple of integers
interpolator : str
Either `nearest`, `linear` or None.
fill_value : int
Returns
-------
SimpleITK image.
"""
if isinstance(sitk_image, str):
sitk_image = sitk.ReadImage(sitk_image)
num_dim = sitk_image.GetDimension()
if not interpolator:
interpolator = 'linear'
pixelid = sitk_image.GetPixelIDValue()
if pixelid not in [1, 2, 4]:
raise NotImplementedError(
'Set `interpolator` manually, '
'can only infer for 8-bit unsigned or 16, 32-bit signed integers')
if pixelid == 1: # 8-bit unsigned int
interpolator = 'nearest'
orig_pixelid = sitk_image.GetPixelIDValue()
orig_origin = sitk_image.GetOrigin()
orig_direction = sitk_image.GetDirection()
orig_spacing = np.array(sitk_image.GetSpacing())
orig_size = np.array(sitk_image.GetSize(), dtype=np.int)
if not spacing:
min_spacing = orig_spacing.min()
new_spacing = [min_spacing]*num_dim
else:
new_spacing = [float(s) for s in spacing]
assert interpolator in _SITK_INTERPOLATOR_DICT.keys(),\
'`interpolator` should be one of {}'.format(
_SITK_INTERPOLATOR_DICT.keys())
sitk_interpolator = _SITK_INTERPOLATOR_DICT[interpolator]
new_size = orig_size*(orig_spacing/new_spacing)
# Image dimensions are in integers
new_size = np.ceil(new_size).astype(np.int)
# SimpleITK expects lists, not ndarrays
new_size = [int(s) for s in new_size]
resample_filter = sitk.ResampleImageFilter()
resampled_sitk_image = resample_filter.Execute(sitk_image,
new_size,
sitk.Transform(),
sitk_interpolator,
orig_origin,
new_spacing,
orig_direction,
fill_value,
orig_pixelid)
return resampled_sitk_image
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/dicom.py | dicom.py |
import numbers
from scipy.spatial.transform import Rotation as R
import numpy as np
def norm(vec):
"""
normalize the last dimension of a vector to unit 1.
"""
v = np.linalg.norm(vec, axis=-1, keepdims=True)
v[v < 1e-9] = 1.0 # guard against 0 division
return vec / v
def get_rotation_matrix(rot_axis, angle):
"""
get the rotation matrix for given axis + angle
"""
rot_axis = norm(rot_axis)
rad = np.deg2rad(angle)
r = R.from_rotvec(rad * rot_axis)
return r.as_dcm()
def get_range(pts, vec, size):
"""
given a series of points and a vector, finds its range such
all valid info are covered by pts + range* vec
"""
vec[abs(vec) < 1e-9] = 1e-9
pts = np.array(pts)
intercepts = np.hstack([-pts / vec, (size - pts) / vec])
intercepts.sort(-1)
return min(intercepts[:, 2]), max(intercepts[:, 3])
def project_to_plane(pts, plane_norm):
"""
given a plane_norm, return porjected points to that plane
Args:
pts (np.ndarray): Nx3 points
plane_norm (3d vector): the normal vector
Return:
projected_pts (np.ndarray): Nx3 projected points
distance (np.ndarray): N distance
"""
plane_norm = norm(plane_norm)
distance = pts.dot(plane_norm)
projected_pts = pts - distance[:, None]*plane_norm
return projected_pts, distance
def get_rotated_vec(rot_axis, angle, pivot_axis=(1, 0, 0)):
"""
given a rotation axis, angle and pivot_axis, return the rotated vector
"""
rot_axis = norm(rot_axis)
rot_mat = get_rotation_matrix(rot_axis, angle)
pivot_axis = norm(pivot_axis)
vec = norm(np.cross(pivot_axis, rot_axis))
rotated_vec = vec @ rot_mat
return rotated_vec
def get_consistent_normals(pts, angle=0.0, pivot_axis=(1, 0, 0),
return_binormals=False, repeat_last=True):
"""
get a series of normals (and binormals) from a series of points
Args:
pts (np.ndarray): Nx3 points
angle (float): Default = 0.0. rotated angle around anchor vector,
which is normal to rotation axis and pivot axis
pivot_axis (3d vector): Default = (1, 0, 0)
return_binormals (bool): Default = False
repeat_last (bool): Default = True. Repeat last vector so normals
(and binormals) have the same length as input points
"""
tangents = norm(pts[1:] - pts[:-1])
rot_axis = tangents[0]
n0 = get_rotated_vec(rot_axis, angle, norm(pivot_axis))
norm_list = [n0]
"""
for t in tangents[1:]:
tmp = np.cross(n0, t)
n0 = np.cross(t, tmp)
norm_list.append(n0)
"""
def calc_norm(t):
n0 = norm_list[-1]
tmp = np.cross(n0, t)
n0 = np.cross(t, tmp)
norm_list.append(n0)
# apparently, using np.vectorize is slightly faster?
np_calc_norm = np.vectorize(calc_norm, signature='(n)->()')
np_calc_norm(t=tangents[1:])
norms = norm(norm_list)
if repeat_last:
norms = np.vstack([norms, norms[-1]])
tangents = np.vstack([tangents, tangents[-1]])
if not return_binormals:
return norms
binorms = np.cross(norms, tangents)
return norms, binorms
def grids_to_torch(grids, size, dim=5):
"""
convert np grids to torch.Tensor
"""
import torch
grids = torch.Tensor(grids / size * 2.0 - 1.0)
while grids.dim() < dim:
# 3d sampler is 5d, namely NCWHD
grids = grids[None]
return grids
def get_straight_cpr_grids(cl, angle, size=None, pivot_axis=(1, 0, 0),
voxel_spacing=None, sample_spacing=0.0,
width=40, as_torch=True):
"""
get the sampling_grids for straight cpr
Args:
cl (np.ndarray): Nx3 centerline points
angle (float or [float]): rotated angle around anchor vector,
which is normal to rotation axis and pivot axis
size ([w, h, d]): size of image
pivot_axis (3d vector): Default = (1, 0, 0)
voxel_spacing (3d vector): spacing of image
sample_spacing (float): spacing for pixel width, as height
spacing is predetermined by centerline spacing.
Default = 0.0, which is sampled with unit vector
width (int): half width around centerline. Default = 40
as_torch (bool): Default = True. grid is scaled to [-1, 1],
thus can be applied to torch.nn.functional.grid_sample
Return:
grids (np.ndarry or torch.Tensor)
Note:
0. If sample_spacing is given, voxel_spacing must be set
1. If as_torch = True, size must be given.
"""
assert (not as_torch) or (size is not None), \
"have to set size when return as torch grid"
assert (sample_spacing == 0.0) or (voxel_spacing is not None), \
"have to set voxel_spacing when sample_spacing > 0.0"
if isinstance(angle, numbers.Number):
normals = get_consistent_normals(cl,
angle=angle,
pivot_axis=pivot_axis,
return_binormals=False)
sum_rep = "w,hk->hwk"
else:
# assuming its a series of angles
n, bn = get_consistent_normals(cl, angle=0, return_binormals=True)
normals = [np.cos(np.deg2rad(a)) * n +
np.sin(np.deg2rad(a)) * bn for a in angle]
sum_rep = "w,chk->chwk"
if sample_spacing > 0.0:
norm = np.linalg.norm(normals * np.array(voxel_spacing), axis=-1)
normals *= sample_spacing / norm[..., None]
grids = np.einsum(sum_rep, np.arange(-width, width + 1),
normals) + cl[:, None]
if as_torch:
grids = grids_to_torch(grids, size)
return grids
def get_stretched_cpr_grids(cl, angle, size, rot_axis=None,
pivot_axis=(1, 0, 0), voxel_spacing=None,
sample_spacing=0.0, as_torch=True,
return_pts2d=False):
"""
get the sampling_grids for stretched cpr
Args:
cl (np.ndarray): Nx3 centerline points
angle (float): rotated angle around anchor vector,
which is normal to rotation axis and pivot axis
size ([w, h, d]): size of image
rot_axis (3d vector or None): Default = None.
If not given, set to the end points of centerline
pivot_axis (3d vector): Default = (1, 0, 0)
voxel_spacing (3d vector): spacing of image
sample_spacing (float): spacing for pixel width, as height
spacing is predetermined by centerline spacing.
Default = 0.0, which is sampled with unit vector
as_torch (bool): Default = True. grid is scaled to [-1, 1],
thus can be applied to torch.nn.functional.grid_sample
return_pts2d (bool): Default = False.
Return the centerline position in 2d
Return:
grids (np.ndarry or torch.Tensor)
pts2d (np.ndarry): Nx2 centerline points
"""
assert (sample_spacing == 0.0) or (voxel_spacing is not None), \
"have to set voxel_spacing when sample_spacing > 0.0"
assert isinstance(angle, numbers.Number), "angle has to be float"
if rot_axis is None:
rot_axis = cl[-1] - cl[0]
vec = get_rotated_vec(rot_axis, angle, pivot_axis)
cl, distance = project_to_plane(cl, vec)
r = get_range(cl, vec, size)
end_pts = [cl + r[0] * vec, cl + r[1] * vec]
if sample_spacing > 0.0:
space_ratio = np.linalg.norm(vec * voxel_spacing) / sample_spacing
else:
space_ratio = 1.0
width = int((r[1] - r[0])*space_ratio) + 1
grids = np.linspace(*end_pts, width)
if as_torch:
grids = grids_to_torch(grids, size)
if not return_pts2d:
return grids
h = (distance - r[0])*space_ratio
pts2d = np.vstack([np.arange(len(h)), h]).T
return grids, pts2d
def double_reflection_method(pts, r0=(1, 0, 0)):
"""
approximation of Rotation Minimizing Frames
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/12/Computation-of-rotation-minimizing-frames.pdf
"""
pts = np.array(pts)
r0 = norm(np.array(r0))
vecs = pts[1:] - pts[:-1]
t0 = vecs[0]
normals = [r0]
for index, v1 in enumerate(vecs[:-1]):
c1 = v1.dot(v1)
rL = r0 - (2/c1) * (v1.dot(r0)) * v1
tL = t0 - (2/c1) * (v1.dot(t0)) * v1
t1 = vecs[index+1]
v2 = t1 - tL
c2 = v2.dot(v2)
r1 = rL - (2/c2) * (v2.dot(rL)) * v2
normals.append(r1)
t0 = t1
r0 = r1
normals = norm(normals)
binormals = np.cross(vecs, normals)
return normals, binormals
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/cpr.py | cpr.py |
import lazy_import
import numpy as np
vtk = lazy_import.lazy_module("vtk")
numpy_to_vtk = lazy_import.lazy_callable("vtk.util.numpy_support.numpy_to_vtk")
vtk_to_numpy = lazy_import.lazy_callable("vtk.util.numpy_support.vtk_to_numpy")
def np_to_polydata(pts, cells=None, poly_type="Polys"):
"""
convert np.points (+ faces) to vtk.polydata
"""
polyData = vtk.vtkPolyData()
numberOfPoints = len(pts)
points = vtk.vtkPoints()
for x, y, z in pts:
points.InsertNextPoint(x, y, z)
polyData.SetPoints(points)
if cells is None:
# assuming it is a line
lines = vtk.vtkCellArray()
lines.InsertNextCell(numberOfPoints)
for i in range(numberOfPoints):
lines.InsertCellPoint(i)
polyData.SetLines(lines)
else:
polys = vtk.vtkCellArray()
for indices in cells:
polys.InsertNextCell(len(indices))
for ind in indices:
polys.InsertCellPoint(ind)
setter = getattr(polyData, f"Set{poly_type}")
setter(polys)
return polyData
def endpts_to_polyline(start, end, sampling_rate=1):
"""
convert np.points to bunch of vtk.lines
"""
if sampling_rate > 1:
start = start[::sampling_rate]
end = end[::sampling_rate]
size = len(start)
poly_pts = np.vstack([start, end])
indices = np.vstack([np.arange(size), size+np.arange(size)]).T
poly = np_to_polydata(poly_pts, indices, "Lines")
return poly
def np_to_points(np_mat):
"""
convert np.points to vtk.vtkPoints
"""
pts = vtk.vtkPoints()
pts.SetData(numpy_to_vtk(np_mat))
return pts
def get_equal_length_pts(pts, sample_spacing, method="cardinal"):
"""
given a series of points, return equal spacing sampled points
using vtk spline to approximate the parametric curve
"""
polyData = np_to_polydata(pts)
spline = vtk.vtkSplineFilter()
spline.SetInputDataObject(polyData)
if method == "cardinal":
spline.SetSpline(vtk.vtkCardinalSpline())
elif method == "kochanek":
spline.SetSpline(vtk.vtkKochanekSpline())
else:
pass
spline.SetSubdivideToLength()
spline.SetLength(sample_spacing)
spline.Update()
equal_length_pts = vtk_to_numpy(spline.GetOutput().GetPoints().GetData())
return equal_length_pts
def get_parametric_pts(pts, linear=False, num_pts=-1, as_np=True):
vtkpts = np_to_points(pts)
spline = vtk.vtkParametricSpline()
if linear:
spline.SetXSpline(vtk.vtkSCurveSpline())
spline.SetYSpline(vtk.vtkSCurveSpline())
spline.SetZSpline(vtk.vtkSCurveSpline())
spline.SetPoints(vtkpts)
ret = vtk.vtkParametricFunctionSource()
ret.SetParametricFunction(spline)
if num_pts > 0:
ret.SetUResolution(num_pts)
ret.Update()
if as_np:
return vtk_to_numpy(ret.GetOutput().GetPoints().GetData())
return ret
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/vtk.py | vtk.py |
import itertools
import numpy as np
def flat_nested_list(nested_list):
return list(itertools.chain(*nested_list))
def get_bounding_box(edge_list, dim=2, verbose=False):
"""
given a (nested) list of points, return its bounding box
args:
edge_list (list[points])
dim (int): point dimension, default is 2
if the list is empty, all values are defaulted to -1
return:
bounding_box: (2d np.array with shape 2xdim): [top_left, bot_right]
"""
edge_list_np = np.array(flat_nested_list(edge_list))
try:
edge_list_np = edge_list_np.reshape(-1, dim)
min_point = edge_list_np.min(axis=0)
max_point = edge_list_np.max(axis=0)
bounding_box = np.vstack((min_point, max_point))
except Exception:
dtype = edge_list_np.dtype
if verbose:
print(edge_list_np, dtype)
bounding_box = np.ones((2, dim), dtype=dtype) * -1
return bounding_box
def psf(pts, kernel=0, size=None, as_tuple=True):
"""
point spread function
Args:
pts (list[float]): N points with K dim
kenerl (int) : 0 = center, 1=8 points, 2=27 points
size (img.size): K int
as_tuple (bool): if output as tuple
Return:
pts_list:
if as_tuple=True: (array, ) x K
if as_tuple=False: N x K array
Note: the kernel points count is using 3-d input as reference.
"""
if kernel == 1:
pts = np.array(pts).astype(int)
else:
pts = np.array(pts).round().astype(int)
if pts.size == 0:
return pts
if len(pts.shape) == 1:
# dim -> 1 x dim
pts = pts[None]
if kernel > 0:
dim = pts.shape[-1]
if kernel == 1:
neighbor_pts = np.stack(np.meshgrid(*[(0, 1)] * dim))
elif kernel == 2:
neighbor_pts = np.stack(np.meshgrid(*[(-1, 0, 1)] * dim))
# N x dim x 1 + dim x 27 -> N x dim x 27
pts = pts[..., None] + neighbor_pts.reshape(dim, -1)
# N x dim x 27 -> N*27 x dim
pts = pts.transpose(0, 2, 1).reshape(-1, dim)
size = None if size is None else np.array(size) - 1
pts = pts.clip(0, size)
if as_tuple:
pts = tuple(pts.T)
return pts
def _test_psf():
import boxx
s = 10
size = (s, s, s)
pts_list = [(i, i, i) for i in range(s)]
for kernel in range(3):
img = np.zeros(size)
pts = psf(pts_list, kernel, size=size, as_tuple=True)
img[pts] = 1
boxx.show(img)
def union_merge(merge_mat):
"""
return group_indices based on merge_mat
Args:
merge_mat (NxN np.array or torch.Tensor): merging criteria
Return:
group_indices (list[indices])
"""
N = len(merge_mat)
if N == 0:
return []
else:
item_id = np.arange(N)
group_indices = [[i] for i in range(N)]
for id1 in range(N):
for id2 in range(N):
if not merge_mat[id1, id2]:
continue
min_id = min(item_id[id1], item_id[id2])
for cur_id in (item_id[id1], item_id[id2]):
if cur_id == min_id:
continue
group_indices[min_id].extend(group_indices[cur_id])
group_indices[cur_id] = []
item_id[group_indices[min_id]] = min_id
group_indices = [i for i in group_indices if i != []]
return group_indices
def get_num_union(pts1, pts2):
pts_all = np.concatenate([pts1, pts2])
num_union = len(np.unique(pts_all, axis=0))
return num_union
def get_pts_merge_mat(pts_list, pts_list2=None, ratio=0.25, criteria="min"):
"""
get the merge_mat for points list
Args:
pts_list (list[np.ndarray]):
each item in pts_list is an array of points
pts_list2 (list[np.ndarray]):
secondary list, if is not given, assuming it's self merge
Return:
merge_mat (N x N np.ndarray): binary mat
"""
if pts_list2 is None:
replicate = True
pts_list2 = pts_list
else:
replicate = False
M = len(pts_list)
N = len(pts_list2)
merge_mat = np.ones((M, N))
for i in range(M):
for j in range(N):
if i >= j and replicate:
merge_mat[i, j] = merge_mat[j, i]
continue
pts1, pts2 = pts_list[i], pts_list2[j]
num1, num2 = len(pts1), len(pts2)
# get shared pts
num_union = get_num_union(pts1, pts2)
divident = num1 + num2 - num_union
if criteria == "min":
divisor = min(num1, num2)
elif criteria == "iou":
divisor = num_union
elif criteria == "self":
divisor = num1
elif criteria == "ref":
divisor = num2
else:
raise NotImplementedError(f"unkown criteria {criteria}")
merge_mat[i, j] = divident * 1.0 / divisor
return merge_mat > ratio
def get_rounded_pts(pts_list, index_range=(0, None), stride=1.0,
as_unique=False):
"""
given a list of points, cast them to int
"""
start, end = index_range
if end is None:
end = len(pts_list) - 1
assert end >= start, f"invalid index_range {index_range}"
pts = np.array(pts_list)
pts = (pts[start:end + 1] / stride).round() * stride
pts = pts.astype(int)
if as_unique:
return np.unique(pts, axis=0)
else:
return pts
def _test_union_merge():
def has_match(pts):
return (merge_fn(pts) - np.eye(len(pts))).max(0)
pts = np.random.rand(10)
def merge_fn(pts):
return abs(pts[None] - pts[:, None]) < 0.1
merge_mat = merge_fn(pts)
indices_group = union_merge(merge_mat)
valid = True
for indices in indices_group:
if len(indices) == 1:
continue
if not has_match(pts[indices]).all():
print("pts within group is not connected to others")
print(merge_mat, indices_group, indices)
valid = False
break
for _ in range(10):
indices = [np.random.choice(i) for i in indices_group]
if has_match(pts[indices]).any():
print("pts between groups is connected")
print(merge_mat, indices_group, indices)
valid = False
break
return valid
def get_intersection_pts(pts1, pts2):
"""
this is numpy version of [i for i in pts1 if i in pts2]
it does 'vector-wise' comparison
"""
pts1 = np.array(pts1)
pts2 = np.array(pts2)
original = pts1
assert pts1.shape[1:] == pts2.shape[1:], "shape mismatch between inputs"
pts1 = np.ascontiguousarray(pts1.reshape(pts1.shape[0], -1))
dtype = [('f{i}'.format(i=i), pts1.dtype) for i in range(pts1.shape[1])]
c1 = pts1.view(dtype) # c as consolidated
c2 = np.ascontiguousarray(pts2.reshape(pts2.shape[0], -1)).view(dtype)
indices = np.isin(c1, c2)
ret = original[indices.squeeze()]
return ret
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/point.py | point.py |
import gzip
import json
import os
import os.path as osp
import pickle
import shutil
import tarfile
import time
from collections import Callable
import numpy as np
from PIL import Image
from .dicom import sitk, sitk_read_image, is_valid_file
PARSER_EXT_DICT = {"txt": "txt",
"pickle": "pkl",
"json": "json",
"torch": "pth",
"sitk": ["dicom", "dcm", "nii", "nii.gz"],
"image": ["png", "jpg", "jpeg", "bmp"],
"numpy": ["npy", "npz"]}
def _inverse_dict(d):
inv_d = {}
for k, v in d.items():
if isinstance(v, list):
inv_d.update({_v: k for _v in v})
else:
inv_d[v] = k
return inv_d
EXT_TO_PARSER_DICT = _inverse_dict(PARSER_EXT_DICT)
def make_dir(*args):
"""
the one-liner directory creator
"""
path = osp.join(*[arg.strip(" ") for arg in args])
if not osp.isdir(path):
from random import random
time.sleep(random() * 0.001)
if not osp.isdir(path):
os.makedirs(path)
return path
def read_str(string, default_out=None):
"""
the one-liner string parser
"""
def invalid_entry(value):
return value is None or value == ""
def invalid_type(value):
raise ValueError()
if invalid_entry(string):
return default_out
if isinstance(string, str):
parser = json.loads
elif isinstance(string, bytes):
parser = pickle.loads
else:
parser = invalid_type
try:
out = parser(string)
except Exception:
out = default_out
print(string)
return out
def load(filename, file_type="auto", **kwargs):
"""
the one-liner loader
Args:
filename (str)
file_type (str): support types: PARSER_EXT_DICT
"""
if not is_valid_file(filename):
return None
if file_type == "auto":
# check ext reversely
# eg: a.b.c.d -> ["d", "c.d", "b.c.d", "a.b.c.d"]
ext = ""
for token in filename.lower().split(".")[::-1]:
ext = f"{token}.{ext}".strip(".")
if ext in EXT_TO_PARSER_DICT:
file_type = EXT_TO_PARSER_DICT[ext]
break
else:
file_type = "unknown"
if file_type == "txt":
with open(filename, "r") as f:
result = f.readlines()
elif file_type == "json":
with open(filename, "r") as f:
result = json.load(f, **kwargs)
elif file_type == "pickle":
with open(filename, "rb") as f:
result = pickle.load(f, **kwargs)
elif file_type == "torch":
import torch
result = torch.load(filename, map_location=torch.device("cpu"))
elif file_type == "sitk":
result = sitk_read_image(filename, **kwargs)
elif file_type == "image":
result = Image.open(filename)
as_np = kwargs.get("as_np", False)
if as_np:
result = np.array(result)
elif file_type == "numpy":
o = np.load(filename, allow_pickle=kwargs.get("allow_pickle", False))
if isinstance(o, np.ndarray):
return o
if kwargs.get("lazy", False):
# if its lazy loading, simply return the object
return o
if len(o.files) == 1:
# if only 1 array, return as it is
return o.get(o.files[0])
else:
# asuming is multiple array and load sequentially
result = {}
for k in o.files:
v = o.get(k)
if v.dtype == "O":
v = v.item()
result[k] = v
return result
elif file_type == "unknown":
raise ValueError(f"Unknown ext {filename}")
else:
raise NotImplementedError(f"Unknown file_type {file_type}")
return result
def save(to_be_saved, filename, file_type="auto", **kwargs):
"""
the one-liner saver
Args:
to_be_saved (any obj)
filename (str)
file_type (str): support types: PARSER_EXT_DICT
"""
if not isinstance(filename, str):
return None
if file_type == "auto":
ext = filename.rpartition(".")[-1]
file_type = EXT_TO_PARSER_DICT.get(ext, "unknown")
if file_type == "txt":
with open(filename, "w") as f:
f.write(to_be_saved)
elif file_type == "json":
with open(filename, "w") as f:
json.dump(to_be_saved, f)
elif file_type == "pickle":
with open(filename, "wb") as f:
pickle.dump(to_be_saved, f)
elif file_type == "torch":
import torch
torch.save(to_be_saved, filename)
elif file_type == "sitk":
sitk.WriteImage(to_be_saved, filename)
elif file_type == "image":
if isinstance(to_be_saved, np.ndarray):
to_be_saved = Image.fromarray(to_be_saved)
assert isinstance(to_be_saved, Image.Image)
to_be_saved.save(filename)
elif file_type == "numpy":
saver = np.savez_compressed if kwargs.get(
"compressed", False) else np.savez
if isinstance(to_be_saved, dict):
saver(filename, **to_be_saved)
else:
saver(filename, to_be_saved)
elif file_type == "unknown":
raise ValueError(f"Unknown ext {filename}")
else:
raise NotImplementedError(f"Unknown file_type {file_type}")
def recursive_copy(src, dst, softlink=False, overwrite=True, filter_fn=None):
"""
recursively update dst root files with src root files
Args:
src (str): source root path
dst (str): destination root path
softlink (bool): Default False, if True, using os.symlink instead of copy
overwrite (bool): Default True, if overwrite when file already exists
filter_fn (function): given basename of src path, return True/False
"""
src_file_list = []
for root, dirs, files in os.walk(src):
for filename in files:
if isinstance(filter_fn, Callable) and not filter_fn(filename):
continue
relative_root = root.rpartition(src)[-1].lstrip("/ ")
src_file_list.append((relative_root, filename))
make_dir(dst)
dst_file_list = []
for root, dirs, files in os.walk(dst):
for filename in files:
relative_root = root.rpartition(src)[-1].lstrip("/ ")
dst_file_list.append((relative_root, filename))
for f in src_file_list:
if f in dst_file_list:
continue
relative_root = f[0]
make_dir(dst, relative_root)
src_path = osp.join(src, *f)
dst_path = osp.join(dst, *f)
if osp.exists(dst_path):
if overwrite:
if osp.islink(dst_path):
if osp.isdir(dst_path):
os.rmdir(dst_path)
else:
os.unlink(dst_path)
else:
os.remove(dst_path)
print(f"{dst_path} exists, overwrite")
else:
print(f"{dst_path} exists, skip")
continue
if softlink:
os.symlink(src_path, dst_path)
else:
shutil.copyfile(src_path, dst_path)
def unzip(src_path, dst_path):
"""
the one-liner unzip function, currently support "gz" and "tgz"
"""
if osp.isdir(dst_path):
filename = osp.basename(src_path).rpartition(".")[0]
dst_path = osp.join(dst_path, filename)
if src_path.endswith(".gz"):
with gzip.open(src_path, 'rb') as f_in:
with open(dst_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif src_path.endswith(".tgz"):
with tarfile.open(src_path, "r:gz") as tar:
tar.extractall(path=dst_path)
else:
raise NotImplementedError(f"unrecognized zip format {src_path}")
return dst_path
def get_folder_size(path):
"""
credit to:
https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python/1392549#1392549
"""
nbytes = sum(osp.getsize(f)
for f in os.listdir(path) if osp.isfile(f))
return nbytes
__all__ = [k for k in globals().keys() if not k.startswith("_")] | zqy-utils | /zqy_utils-0.2.0-py3-none-any.whl/zqy_utils/io.py | io.py |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from collections import OrderedDict
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.fs.archive import ZIP
from trueaccord.pants.scalapb.targets.scalapb_library import ScalaPBLibrary
class ScalaPBGen(SimpleCodegenTask, NailgunTask):
def __init__(self, *args, **kwargs):
super(ScalaPBGen, self).__init__(*args, **kwargs)
@classmethod
def register_options(cls, register):
super(ScalaPBGen, cls).register_options(register)
register('--protoc-version', fingerprint=True,
help='Set a specific protoc version to use.', default='300')
cls.register_jvm_tool(register, 'scalapbc')
@classmethod
def product_types(cls):
return ['java', 'scala']
def synthetic_target_type(self, target):
return ScalaLibrary
def is_gentarget(self, target):
return isinstance(target, ScalaPBLibrary)
def execute_codegen(self, target, target_workdir):
sources = target.sources_relative_to_buildroot
source_roots = self._calculate_source_roots(target)
source_roots.update(self._proto_path_imports([target]))
scalapb_options = []
if target.payload.java_conversions:
scalapb_options.append('java_conversions')
if target.payload.grpc:
scalapb_options.append('grpc')
if target.payload.flat_package:
scalapb_options.append('flat_package')
if target.payload.single_line_to_string:
scalapb_options.append('single_line_to_string')
gen_scala = '--scala_out={0}:{1}'.format(','.join(scalapb_options), target_workdir)
args = ['-v%s' % self.get_options().protoc_version, gen_scala]
if target.payload.java_conversions:
args.append('--java_out={0}'.format(target_workdir))
for source_root in source_roots:
args.append('--proto_path={0}'.format(source_root))
classpath = self.tool_classpath('scalapbc')
args.extend(sources)
main = 'com.trueaccord.scalapb.ScalaPBC'
result = self.runjava(classpath=classpath, main=main, args=args, workunit_name='scalapb-gen')
if result != 0:
raise TaskError('scalapb-gen ... exited non-zero ({})'.format(result))
def _calculate_source_roots(self, target):
source_roots = OrderedSet()
def add_to_source_roots(target):
if self.is_gentarget(target):
source_roots.add(target.source_root)
self.context.build_graph.walk_transitive_dependency_graph(
[target.address],
add_to_source_roots,
postorder=True)
return source_roots
def _jars_to_directories(self, target):
"""Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
"""
files = set()
jar_import_products = self.context.products.get_data(JarImportProducts)
imports = jar_import_products.imports(target)
for coordinate, jar in imports:
files.add(self._extract_jar(coordinate, jar))
return files
def _extract_jar(self, coordinate, jar_path):
"""Extracts the jar to a subfolder of workdir/extracted and returns the path to it."""
with open(jar_path, 'rb') as f:
outdir = os.path.join(self.workdir, 'extracted', sha1(f.read()).hexdigest())
if not os.path.exists(outdir):
ZIP.extract(jar_path, outdir)
self.context.log.debug('Extracting jar {jar} at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
else:
self.context.log.debug('Jar {jar} already extracted at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
return outdir
def _proto_path_imports(self, proto_targets):
for target in proto_targets:
for path in self._jars_to_directories(target):
yield os.path.relpath(path, get_buildroot())
def _copy_target_attributes(self):
"""Propagate the provides attribute to the synthetic java_library() target for publishing."""
return ['provides'] | zr.scalapb.pants | /zr.scalapb.pants-0.1.6.tar.gz/zr.scalapb.pants-0.1.6/src/trueaccord/pants/scalapb/tasks/scalapb_gen.py | scalapb_gen.py |
import numpy as np
from scipy.ndimage import gaussian_filter
from skimage.morphology import remove_small_objects
def nan_gaussian(image, sigma):
"""Apply a gaussian filter to an array with nans.
Parameters
----------
image : array
an array with nans
sigma : float
σ is the standard deviation of the Gaussian distribution
Returns
-------
gauss : array, same shape as `image`
The Gaussian-filtered input image, with nan entries ignored.
"""
nan_msk = np.isnan(image)
loss = np.zeros(image.shape)
loss[nan_msk] = 1
loss = gaussian_filter(loss, sigma=sigma, mode='constant', cval=1)
gauss = image.copy()
gauss[nan_msk] = 0
gauss = gaussian_filter(gauss, sigma=sigma, mode='constant', cval=0)
gauss[nan_msk] = np.nan
gauss += loss * image
return gauss
def minimize_grain_contrast(image, sigma):
"""Minimise grain contrast or uneven lighting.
This is accomplished by dividing the original image
by an image with a gaussian blur applied.
Parameters
----------
image : array
Image to minimise grain contrast.
sigma : float
Sigma value for gaussian blur.
Returns
-------
removed_grains : array, same shape as image
Output image.
"""
gaussian_blur = nan_gaussian(image, sigma=sigma)
removed_grains = image / gaussian_blur
return(removed_grains)
def simple_threshold(image,scale_um, crop_threshold, threshold, small_obj=None):
"""Threshold the image, accounting for crop and small features.
Hydrides are assumed to be dark (value below the threshold) in the input
image, but are returned as bright (1.0) features in the output, and vice-
-versa for the matrix.
Parameters
----------
removed_grains : array
image to threshold.
crop_threshold : array of bool
Thresholding is only performed within regions labeled False in this
array. Values labeled True will be set to np.nan in the output
scale_um: int
Scale bar value in microns
theshold : float
threshold level.
small_obj : int, optional
size of features to be removed and not thresholded in microns
Returns
-------
thres_disp : array of float
The thresholded image, with 1.0 in foreground pixels, 0.0 in
background pixels, and np.nan in cropped pixels.
"""
thres = image < threshold
if small_obj is not None:
thres = remove_small_objects(thres, min_size=small_obj/scale_um)
thres_disp = thres.astype(float) # this will copy and set True to 1.0
thres_disp[crop_threshold] = np.nan
return thres_disp | zrHAPPY | /zrHAPPY-0.3-py3-none-any.whl/HAPPY/image_processing.py | image_processing.py |
import numpy as np
from matplotlib_scalebar.scalebar import ScaleBar
from matplotlib import pyplot as plt
def addScaleBar(ax, scale, location='upper right'):
"""Add a scale bar to an axes.
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib axis on which to plot.
"""
if scale:
scalebar = ScaleBar(scale, location=location)
ax.add_artist(scalebar)
plt.show()
def addArrows(ax, c='r', lenx=0.04, leny=0.06, flip=False):
"""
Add coordinate definition arrows (radial and circumferential) to an axes.
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib axis to plot on
"""
startcoord = (0.02, 0.02)
ax.annotate(
"",
xy=(startcoord[0]-0.002, startcoord[1]),
xytext=(startcoord[0]+lenx, startcoord[1]),
xycoords='axes fraction',
c=c,
arrowprops=dict(arrowstyle="<-", color=c, lw=2),
)
ax.annotate(
"",
xy=(startcoord[0], startcoord[1]-0.002),
xytext=(startcoord[0], startcoord[1]+leny),
xycoords='axes fraction',
c=c,
arrowprops=dict(arrowstyle="<-", color=c, lw=2),
)
positions = [(0.011, startcoord[1]+leny), (startcoord[0]+lenx, 0.01)]
if flip:
labels = 'CR'
else:
labels = 'RC'
for label, position in zip(labels, positions):
ax.annotate(
label,
xy=position,
xycoords='axes fraction',
fontsize=14,
fontweight='bold',
c=c,
)
def plot(img, title, scale=None, location=None, ax=None):
"""Plotting an imge.
Parameters
----------
img : array
Image data to be plotted
title : str
Title of plot.
scale : float
Scale in meters per pixel.
location : str
Location of scale bar i.e. 'lower right', 'upper left'
"""
if ax is None:
_, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img, cmap='gray')
ax.set_title(title, fontsize=14)
ax.set_axis_off()
if scale is not None:
addScaleBar(ax, scale=scale, location=location)
addArrows(ax)
def plot_comparison(img1, title1, img2, title2, scale=None, location=None):
"""Plotting two images next to each other.
Parameters
----------
img1 : array
Image data to be plotted on left.
img2 : array
Image data to be plotted on right.
title1 : str
Title of left-hand plot.
title2 : str
Title of right-hand plot.
scale : float
Scale in meters per pixel.
location : str
Location of scale bar i.e. 'lower right', 'upper left'
"""
fig, (ax_a, ax_b) = plt.subplots(
ncols=2, figsize=(14, 7), sharex=True, sharey=True
)
plot(img1, title1, ax=ax_a)
plot(img2, title2, scale=scale, location=location, ax=ax_b)
fig.tight_layout()
def plot_hist(arr):
_, ax = plt.subplots(figsize=(8, 5))
histogram = ax.hist(arr[~np.isnan(arr)].flatten(), bins=60, range=(0, 2))
ax.set_title('Image Histogram', fontsize=14)
ax.set_xlabel('Gray value', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
return histogram | zrHAPPY | /zrHAPPY-0.3-py3-none-any.whl/HAPPY/plot_functions.py | plot_functions.py |
import numpy as np
from scipy import ndimage
from skimage.graph import route_through_array, MCP_Flexible, MCP_Geometric
from skimage.transform import rescale
from scipy.ndimage.morphology import binary_dilation
from skimage.graph import MCP_Flexible
#so here we are creating a new class that is based off MCP flex allowing us to change the functions in it
class My_MCP(MCP_Flexible): #has a set of functions and variables
def __init__(self, costs, offsets=None, fully_connected=True, distance_weight=0):
self.distance_weight = distance_weight
super().__init__(
costs, offsets=offsets, fully_connected=fully_connected
) # Based on the skimage.graph MCP_Flexible class
def travel_cost(self, old_cost, new_cost, offset_length):
my_cost = new_cost + (self.distance_weight*offset_length)
return my_cost
def det_crack_path(
thres,
crop_threshold,
num_runs,
kernel_size,
distance_weight=0,
):
"""Determine possible crack paths in the micrograph.
Parameters
----------
thres: array
thesholded image to look at
crop_threshold: array
calculated during thresholding, array of true and false values
num_runs: int
number of crack paths to determine
kernel_size: int
Once a crack is found, all future paths must be at least kernel_size
away from it.
distance_weight : float
Crack paths should follow hydrides but only up to a point: they should
also be short. This weight determines how much the "shortness"
consideration should count compared to the hydride. Higher weight =>
shorter paths.
Returns
-------
edist: array
min euclidean distace from the hydride to the matrix
path_list:
list of possible crack paths
cost_list: list
list of cost values for each crack path
"""
# Use distance away from a hydride as a path to route through
edist = ndimage.morphology.distance_transform_edt(thres == 0)
edist = rescale(edist, (1, 1))
# Add a row of zeros on the top and bottom and set cost=0 outside tube
edist[0, :] = 0
edist[-1, :] = 0
edist = np.where(crop_threshold, 0, edist)
# Make a empty list to store paths and costs
path_list = []
cost_list = []
nrows, ncols = edist.shape
for _ in np.arange(num_runs):
# Coordinates and cost corresponding to path
m = My_MCP(edist, fully_connected=True, distance_weight=distance_weight)
# if distance_weight==0, this should behave the same as
# m = MCP_Geometric(edist, fully_connected=True)
# every coordinate on the top row is a possible start point
starts = np.stack((np.zeros(ncols), np.arange(ncols)), axis=1)
# every coordinate on tho bottom row is a possible end point
ends = np.stack((np.full(ncols, nrows - 1), np.arange(ncols)), axis=1)
costs, traceback_array = m.find_costs(starts, ends, find_all_ends=False)
end_column = np.argmin(costs[-1, :])
path = np.array(m.traceback((-1, end_column)))
cost = costs[-1, end_column]
# old code that we imitated manually with classes above:
# path, cost = route_through_array(edist, [0, 0], [-1, -1])
# Boolean array based on coordinates, True is on path
path_array = np.zeros(np.shape(edist), dtype=bool)
for coord in path:
path_array[coord[0], coord[1]] = True
# Take away points outside of crop, make coord array and append to list
path_array_cropped = np.logical_and(path_array, ~crop_threshold)
path_coords = np.transpose(np.nonzero(path_array_cropped))
path_list.append(np.array(path_coords))
cost_list.append(cost)
# Filtering path based on kernel size, so that the next run will take
# a different path
filter_array = binary_dilation(
path_array_cropped, iterations=kernel_size
)
edist = np.where(filter_array, np.inf, edist)
edist = np.where(crop_threshold, 0, edist)
edist = ndimage.morphology.distance_transform_edt(thres == 0)
edist = rescale(edist, (1, 1))
return edist, path_list, cost_list | zrHAPPY | /zrHAPPY-0.3-py3-none-any.whl/HAPPY/crack_path.py | crack_path.py |
import numpy as np
from scipy import ndimage
from skimage.transform import hough_line, hough_line_peaks
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from .plot_functions import addScaleBar, addArrows
def hough_rad(
image,
num_peaks,
min_distance=5,
min_angle=5,
val=0.25,
scale=None,
location=None,
):
"""Perform Hough Line Transfom to determine radial hydride fraction.
Parameters
----------
image : array
input thresholded hydride image
num_peaks : float
number of measured peaks in one rectangle.
min_distance : float
minimum distance separating two parallel lines. It seems that a value
of 5 is good
min_angle : float
minimum angle separating two lines it seems that a value of 5 is good
val : float
val is a number < 1 where, only hydrides that are at least val times
the length of the longest hydride are measured. This helps to reduce
noise becuase only hydrides that are significant in size are included
in the calculation. The default value for this is 0.25, if you have
much smaller hydride branches that you want to pick up this value can
be reduced, but remember the noise increases as well.
scale : float
Scale in meters per pixel.
location : str
Location of scale bar i.e. 'lower right', 'upper left'
Returns
-------
angle_list : array
List of angles generated from the hough transform.
len_list : array
List of line lengths generated from the hough transform.
"""
fig, axes = plt.subplots(
ncols=2, figsize=(14, 7), sharex=True, sharey=True
)
ax = axes.ravel()
# Plotting
ax[0].imshow(image, cmap='gray')
ax[0].set_axis_off()
ax[0].set_title('Thresholded image', fontsize=14)
ax[1].imshow(image, cmap='gray')
ax[1].set_axis_off()
ax[1].set_title('Hough Transform', fontsize=14)
# Label image
label, num_features = ndimage.label(image > 0.1)
slices = ndimage.find_objects(label)
# Loop over each slice
len_list = []
angle_list = []
d_list = []
for feature in np.arange(num_features):
h, theta, d = hough_line(
label[slices[feature]],
theta=np.linspace(-np.pi/2 , np.pi/2 , 90),
)
threshold = val*np.amax(h)
h_peak, angles, d_peak = hough_line_peaks(
h,
theta,
d,
threshold=threshold,
num_peaks=num_peaks,
min_distance=min_distance,
min_angle=min_angle,
)
angle_list.append(angles)
len_list.append(h_peak)
d_list.append(d_peak)
# Draw bounding box
x0_box = np.min([slices[feature][1].stop, slices[feature][1].start])
y0_box = np.min([slices[feature][0].stop, slices[feature][0].start])
x1_box = np.max([slices[feature][1].stop, slices[feature][1].start])
y1_box = np.max([slices[feature][0].stop, slices[feature][0].start])
rect = Rectangle(
(x0_box, y0_box),
x1_box-x0_box,
y1_box-y0_box,
angle=0.0,
ec='r',
fill=False,
)
ax[1].add_artist(rect)
# origin = np.array((0, np.abs(x1_box-x0_box))) # never used
for _, angle, dist in zip(h_peak, angles, d_peak):
y0b, y1b = (
(dist - np.array((0, x1_box-x0_box)) * np.cos(angle))
/ np.sin(angle)
)
y0_line = y0b + y0_box
y1_line = y1b + y0_box
x0_line = x0_box
x1_line = x1_box
m = (y1_line-y0_line)/(x1_line-x0_line)
# Fix lines which go over the edges of bounding boxes
if y0_line < y0_box:
x0_line = ((y0_box - y1_line) / m) + x1_line
y0_line = y0_box
if y0_line > y1_box:
x0_line = ((y1_box - y1_line) / m) + x1_line
y0_line = y1_box
if y1_line < y0_box:
x1_line = ((y0_box - y1_line) / m) + x1_line
y1_line = y0_box
if y1_line > y1_box:
x1_line = ((y1_box - y1_line) / m) + x1_line
y1_line = y1_box
ax[1].plot(np.array((x0_line, x1_line)), (y0_line, y1_line), '-g')
print('Number of detected angles: {0}'.format(len(len_list)))
ax[1].set_xlim(0, image.shape[1])
ax[0].set_ylim(0, image.shape[0])
if scale is not None:
addScaleBar(ax[1], scale, location)
addArrows(ax[0])
addArrows(ax[1])
fig.tight_layout()
return np.concatenate(angle_list), np.concatenate(len_list)
def RHF_no_weighting_factor(angle_list, len_list):
"""Calculate the Radial Hydride Fraction without any weighting factor
Parameters
----------
angle_list : array
calculated from the Hough line transform
len_list : array
List of lengths generated from the hogh line transform
Returns
-------
radial : float
fraction of radial hydrides
circumferential : float
fraction of circumferential hydrides
"""
radial_angles = np.logical_and(
-np.pi / 4 <= angle_list, angle_list < np.pi / 4
)
radial_len = np.sum(len_list[radial_angles])
circumferential_len = np.sum(len_list[~radial_angles])
radial = radial_len / (radial_len + circumferential_len)
circumferential = 1 - radial
return radial, circumferential
def weighted_RHF_calculation(angle_list, len_list):
"""Weighted Radial Hydride Fraction Calculation
Parameters
----------
angle_list : array
List of angles generated from the hogh line transform
len_list : array
List of lengths generated from the hogh line transform
Returns
-------
RHF : float
Weighted radial hydride fraction
"""
deg_angle_list = np.rad2deg(angle_list)
fi = []
for k in deg_angle_list:
if 0 < k <= 30:
x = 1
elif 30 < k <= 50:
x = 0.5
elif 50 < k <= 90:
x = 0
elif -30 < k <= 0:
x = 1
elif -50 < k <= -30:
x = 0.5
elif -90 < k <= -50:
x = 0
fi.append(x)
#The next step is to do the summation
SumOfLixFi = sum(len_list * np.array(fi))
SumOfLi = sum(len_list)
RHF = SumOfLixFi / SumOfLi
return RHF | zrHAPPY | /zrHAPPY-0.3-py3-none-any.whl/HAPPY/radial_hydride_fraction.py | radial_hydride_fraction.py |
import numpy as np
from skimage.morphology import skeletonize
from skan import Skeleton, summarize
import networkx as nx
import toolz as tz
def branch_classification(thres):
"""Predict the extent of branching.
Parameters
----------
thres: array
thresholded image to be analysed
scale: the scale bar size in pixels/metre
Returns
-------
skel: array
skeletonised image
is_main: array
whether the hydride identified is part of the main section or if it is a branch
BLF: int/float
branch length fraction
"""
skeleton = skeletonize(thres)
skel = Skeleton(skeleton, source_image=thres)
summary = summarize(skel)
is_main = np.zeros(summary.shape[0])
us = summary['node-id-src']
vs = summary['node-id-dst']
ws = summary['branch-distance']
edge2idx = {
(u, v): i
for i, (u, v) in enumerate(zip(us, vs))
}
edge2idx.update({
(v, u): i
for i, (u, v) in enumerate(zip(us, vs))
})
g = nx.Graph()
g.add_weighted_edges_from(
zip(us, vs, ws)
)
for conn in nx.connected_components(g):
curr_val = 0
curr_pair = None
h = g.subgraph(conn)
p = dict(nx.all_pairs_dijkstra_path_length(h))
for src in p:
for dst in p[src]:
val = p[src][dst]
if (val is not None
and np.isfinite(val)
and val > curr_val):
curr_val = val
curr_pair = (src, dst)
for i, j in tz.sliding_window(
2,
nx.shortest_path(
h, source=curr_pair[0], target=curr_pair[1], weight='weight'
)
):
is_main[edge2idx[(i, j)]] = 1
summary['main'] = is_main
# Branch Length Fraction
total_length = np.sum(skeleton)
trunk_length = 0
for i in range(summary.shape[0]):
if summary['main'][i]:
trunk_length += summary['branch-distance'][i]
branch_length = total_length - trunk_length
BLF = branch_length/total_length
return skel, is_main, BLF | zrHAPPY | /zrHAPPY-0.3-py3-none-any.whl/HAPPY/branching.py | branching.py |
# Zrb extras
zrb-extras is a [pypi](https://pypi.org) package.
You can install zrb-extras by invoking:
```
pip install zrb-extras
```
# For maintainers
## Publish to pypi
To publish zrb-extras, you need to have a `Pypi` account:
- Log in or register to [https://pypi.org/](https://pypi.org/)
- Create an API token
You can also create a `TestPypi` account:
- Log in or register to [https://test.pypi.org/](https://test.pypi.org/)
- Create an API token
Once you have your API token, you need to create a `~/.pypirc` file:
```
[distutils]
index-servers =
pypi
testpypi
[pypi]
repository = https://upload.pypi.org/legacy/
username = __token__
password = pypi-xxx-xxx
[testpypi]
repository = https://test.pypi.org/legacy/
username = __token__
password = pypi-xxx-xxx
```
To publish zrb-extras, you can do the following command:
```bash
zrb project publish-zrb-extras
```
## Updating version
You can update zrb-extras version by modifying the following section in `pyproject.toml`:
```toml
[project]
version = "0.0.2"
```
## Adding dependencies
To add zrb-extras dependencies, you can edit the following section in `pyproject.toml`:
```toml
[project]
dependencies = [
"Jinja2==3.1.2",
"jsons==1.6.3"
]
```
## Adding script
To make zrb-package-name executable, you can edit the following section in `pyproject.toml`:
```toml
[project-scripts]
zrb-extras = "zrb-extras.__main__:hello"
```
This will look for `hello` callable inside of your `__main__.py` file
| zrb-extras | /zrb_extras-0.0.1.tar.gz/zrb_extras-0.0.1/README.md | README.md |
# 🤖 Zrb: Super framework for your super app

Zrb is a [CLI-based](https://en.wikipedia.org/wiki/Command-line_interface) automation [tool](https://en.wikipedia.org/wiki/Programming_tool) and [low-code](https://en.wikipedia.org/wiki/Low-code_development_platform) platform. Once installed, you can automate day-to-day tasks, generate projects and applications, and even deploy your applications to Kubernetes with a few commands.
To use Zrb, you need to be familiar with CLI.
## Zrb as a low-code framework
Let's see how you can build and run a [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) application.
```bash
# Create a project
zrb project create --project-dir my-project --project-name "My Project"
cd my-project
# Create a Fastapp
zrb project add fastapp --project-dir . --app-name "fastapp" --http-port 3000
# Add library module to fastapp
zrb project add fastapp-module --project-dir . --app-name "fastapp" --module-name "library"
# Add entity named "books"
zrb project add fastapp-crud --project-dir . --app-name "fastapp" --module-name "library" \
--entity-name "book" --plural-entity-name "books" --column-name "code"
# Add column to the entity
zrb project add fastapp-field --project-dir . --app-name "fastapp" --module-name "library" \
--entity-name "book" --column-name "title" --column-type "str"
# Run Fastapp as monolith
zrb project start-fastapp --fastapp-run-mode "monolith"
```
You will be able to access the application by pointing your browser to [http://localhost:3000](http://localhost:3000)

Furthermore, you can also run the same application as `microservices`, run the application as `docker containers`, and even doing some deployments into your `kubernetes cluster`.
```bash
# Run Fastapp as microservices
zrb project start-fastapp --fastapp-run-mode "microservices"
# Run Fastapp as container
zrb project start-fastapp-container --fastapp-run-mode "microservices"
zrb project stop-fastapp-container
# Deploy fastapp and all it's dependencies to kubernetes
docker login
zrb project deploy-fastapp --fastapp-deploy-mode "microservices"
```
You can visit [our tutorials](https://github.com/state-alchemists/zrb/blob/main/docs/tutorials/README.md) to see more cool tricks.
## Zrb as a task-automation framework
Aside from the builtin capabilities, Zrb also allows you to define your automation commands in Python. To do so, you need to create/modify a file named `zrb_init.py`.
```python
# filename: zrb_init.py
from zrb import runner, CmdTask, StrInput
hello = CmdTask(
name='hello',
inputs=[StrInput(name='name', description='Name', default='world')],
cmd='echo Hello {{input.name}}'
)
runner.register(hello)
```
Once defined, your command will be instantly available from the CLI:
```bash
zrb hello
```
```
Name [world]: Go Frendi
🤖 ➜ 2023-06-10T21:20:19.850063 ⚙ 10008 ➤ 1 of 3 • 🐷 zrb hello • Run script: echo Hello Go Frendi
🤖 ➜ 2023-06-10T21:20:19.850362 ⚙ 10008 ➤ 1 of 3 • 🐷 zrb hello • Current working directory: /home/gofrendi/zrb/playground
🤖 ➜ 2023-06-10T21:20:19.857585 ⚙ 10009 ➤ 1 of 3 • 🐷 zrb hello • Hello Go Frendi
Support zrb growth and development!
☕ Donate at: https://stalchmst.com/donation
🐙 Submit issues/pull requests at: https://github.com/state-alchemists/zrb
🐤 Follow us at: https://twitter.com/zarubastalchmst
🤖 ➜ 2023-06-10T21:20:19.898304 ⚙ 10009 ➤ 1 of 3 • 🐷 zrb hello • zrb hello completed in 0.11999917030334473 seconds
To run again: zrb hello --name "Go Frendi"
Hello Go Frendi
```
To learn more about this, you can visit [our getting started guide](https://github.com/state-alchemists/zrb/blob/main/docs/getting-started.md).
# 🫰 Installation
## ⚙️ In local machine
Installing Zrb in your system is as easy as typing the following command in your terminal:
```bash
pip install zrb
```
Just like any other Python package, you can also install Zrb in your [virtual environment](https://docs.python.org/3/library/venv.html). This will allow you to have many versions of Zrb on the same computer.
> ⚠️ If the command doesn't work, you probably don't have Pip/Python on your computer. See `Main prerequisites` subsection to install them.
## 🐋 With docker
If you prefer to work with Docker, you can create a file named `docker-compose.yml`
```yaml
version: '3'
networks:
zrb:
name: zrb
services:
zrb:
build:
dockerfile: Dockerfile
context: .
image: docker.io/stalchmst/zrb:latest
container_name: zrb
hostname: zrb
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./project:/project
networks:
- zrb
ports:
- 3001:3001 # or/and any other ports you want to expose.
```
Once your docker-compose file is created, you can invoke the following command:
```bash
docker compose up -d
```
You will be able to access Zrb by using docker exec:
```bash
docker exec -it zrb zsh
```
# ✅ Main prerequisites
Since Zrb is written in Python, you need to install a few things before installing Zrb:
- 🐍 `Python`
- 📦 `Pip`
- 🏝️ `Venv`
If you are using 🐧 Ubuntu, the following command should work:
```bash
sudo apt install python3 python3-pip python3-venv python-is-python3
```
If you are using 🍎 Mac, the following command will work:
```bash
# Make sure you have homebrew installed, see: https://brew.sh/
brew install python3
ln -s venv/bin/pip3 /usr/local/bin/pip
ln -s venv/bin/python3 /usr/local/bin/python
```
If you prefer Python distribution like [conda](https://docs.conda.io/en/latest/), that might work as well.
# ✔️ Other prerequisites
If you want to generate applications using Zrb and run them on your computer, you will also need:
- 🐸 `Node.Js` and `Npm`.
- You need Node.Js to modify/transpile frontend code into static files.
- You can visit the [Node.Js website](https://nodejs.org/en) for installation instructions.
- 🐋 `Docker` and `Docker-compose` plugin.
- You need `Docker` and `Docker-compose` plugin to
- Run docker-compose-based tasks
- Run some application prerequisites like RabbitMQ, Postgre, or Redpanda.
- The easiest way to install `Docker`, `Docker-compose` plugin, and local `Kubernetes` is by using [Docker Desktop](https://www.docker.com/products/docker-desktop/).
- You can also install `Docker` and `Docker-compose` plugin by following the [Docker installation guide](https://docs.docker.com/engine/install/).
- ☸️ `Kubernetes` cluster.
- Zrb allows you to deploy your applications into `Kubernetes`.
- To test it locally, you will need a [Minikube](https://minikube.sigs.k8s.io/docs/) or other alternatives. However, the easiest way is by enabling `Kubernetes` on your `Docker Desktop`.
- 🦆 `Pulumi`
- You need Pulumi to deploy your applications
# 🏁 Getting started
We have a nice [getting started guide](https://github.com/state-alchemists/zrb/blob/main/docs/getting-started.md) to help you cover the basics. Make sure to check it out😉.
# 📖 Documentation
You can visit [Zrb documentation](https://github.com/state-alchemists/zrb/blob/main/docs/README.md) for more detailed information.
# ☕ Donation
Help Red Skull to click the donation button:
[](https://stalchmst.com/donation)
# 🎉 Fun fact
> Madou Ring Zaruba (魔導輪ザルバ, Madōrin Zaruba) is a Madougu which supports bearers of the Garo Armor. [(Garo Wiki | Fandom)](https://garo.fandom.com/wiki/Zaruba)

| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/README.md | README.md |
from zrb import (
runner, CmdTask, ResourceMaker, DockerComposeTask, FlowTask, FlowNode,
Env, StrInput, HTTPChecker
)
import os
import tomli
CURRENT_DIR = os.path.dirname(__file__)
with open(os.path.join(CURRENT_DIR, 'pyproject.toml'), 'rb') as f:
toml_dict = tomli.load(f)
VERSION = toml_dict['project']['version']
###############################################################################
# Input Definitions
###############################################################################
zrb_image_name_input = StrInput(
name='zrb-image-name',
description='Zrb image name',
prompt='Zrb image name',
default=f'docker.io/stalchmst/zrb:{VERSION}'
)
zrb_latest_image_name_input = StrInput(
name='zrb-latest-image-name',
description='Zrb latest image name',
prompt='Zrb latest image name',
default='docker.io/stalchmst/zrb:latest'
)
###############################################################################
# Env Definitions
###############################################################################
zrb_image_env = Env(
name='ZRB_IMAGE',
os_name='',
default='{{input.zrb_image_name}}'
)
zrb_latest_image_env = Env(
name='ZRB_IMAGE',
os_name='',
default='{{input.zrb_latest_image_name}}'
)
###############################################################################
# Task Definitions
###############################################################################
build = CmdTask(
name='build',
description='Build Zrb',
cwd=CURRENT_DIR,
cmd=[
'set -e',
'echo "🤖 Build zrb distribution"',
f'rm -Rf {CURRENT_DIR}/dist',
'git add . -A',
'flit build',
],
)
runner.register(build)
publish_pip = CmdTask(
name='publish-pip',
description='Publish zrb to pypi',
upstreams=[build],
cwd=CURRENT_DIR,
cmd=[
'set -e',
'echo "🤖 Publish zrb to pypi"',
'flit publish --repository pypi',
]
)
runner.register(publish_pip)
publish_pip_test = CmdTask(
name='publish-pip-test',
description='Publish zrb to testpypi',
upstreams=[build],
cwd=CURRENT_DIR,
cmd=[
'set -e',
'echo "🤖 Publish zrb to testpypi"',
'flit publish --repository testpypi',
]
)
runner.register(publish_pip_test)
prepare_docker = ResourceMaker(
name='prepare-docker',
description='Create docker directory',
template_path=f'{CURRENT_DIR}/docker-template',
destination_path=f'{CURRENT_DIR}/.docker-dir',
replacements={
'ZRB_VERSION': VERSION
}
)
runner.register(prepare_docker)
check_pip = HTTPChecker(
name='check-pip',
is_https=True,
host='pypi.org',
url=f'pypi/zrb/{VERSION}/json',
port=443
)
build_image = DockerComposeTask(
name='build-image',
description='Build docker image',
upstreams=[
prepare_docker,
check_pip,
],
inputs=[zrb_image_name_input],
envs=[zrb_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='build',
compose_args=['zrb']
)
runner.register(build_image)
build_latest_image = DockerComposeTask(
name='build-latest-image',
description='Build docker image',
upstreams=[
prepare_docker,
check_pip,
build_image,
],
inputs=[zrb_latest_image_name_input],
envs=[zrb_latest_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='build',
compose_args=['zrb']
)
runner.register(build_latest_image)
stop_container = DockerComposeTask(
name='stop-container',
description='remove docker container',
upstreams=[prepare_docker],
inputs=[zrb_image_name_input],
envs=[zrb_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='down'
)
runner.register(stop_container)
start_container = DockerComposeTask(
name='start-container',
description='Run docker container',
upstreams=[
build_image,
stop_container
],
inputs=[zrb_image_name_input],
envs=[zrb_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='up',
compose_flags=['-d']
)
runner.register(start_container)
push_image = DockerComposeTask(
name='push-image',
description='Push docker image',
upstreams=[build_image],
inputs=[zrb_image_name_input],
envs=[zrb_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='push',
compose_args=['zrb']
)
runner.register(push_image)
push_latest_image = DockerComposeTask(
name='push-latest-image',
description='Push docker image',
upstreams=[
build_latest_image,
push_image,
],
inputs=[zrb_latest_image_name_input],
envs=[zrb_latest_image_env],
cwd=f'{CURRENT_DIR}/.docker-dir',
compose_cmd='push',
compose_args=['zrb']
)
runner.register(push_latest_image)
publish = FlowTask(
name='publish',
description='Publish new version',
nodes=[
FlowNode(task=publish_pip),
FlowNode(task=push_latest_image),
]
)
runner.register(publish)
install_symlink = CmdTask(
name='install-symlink',
description='Install Zrb as symlink',
upstreams=[build],
cmd=[
'set -e',
f'cd {CURRENT_DIR}',
'echo "🤖 Install zrb"',
'flit install --symlink',
]
)
runner.register(install_symlink)
test = CmdTask(
name='test',
description='Run zrb test',
inputs=[
StrInput(
name='test',
shortcut='t',
description='Specific test case (i.e., test/file.py::test_name)',
prompt='Test (i.e., test/file.py::test_name)',
default=''
),
],
upstreams=[install_symlink],
cmd=[
'set -e',
f'cd {CURRENT_DIR}',
'echo "🤖 Perform test"',
'pytest --ignore-glob="**/template/**/test" --ignore=playground --cov=zrb --cov-report=html --cov-report=term --cov-report=term-missing {{input.test}}' # noqa
],
retry=0,
checking_interval=1
)
runner.register(test)
serve_test = CmdTask(
name='serve-test',
description='Serve zrb test result',
inputs=[
StrInput(
name='port',
shortcut='p',
description='Port to serve coverage result',
prompt='Serve coverage on port',
default='9000'
)
],
upstreams=[test],
cmd=[
'set -e',
f'cd {CURRENT_DIR}',
'echo "🤖 Serve coverage report"',
f'python -m http.server {{input.port}} --directory {CURRENT_DIR}/htmlcov', # noqa
],
checkers=[
HTTPChecker(port='{{input.port}}')
],
retry=0,
checking_interval=0.3
)
runner.register(serve_test)
playground = CmdTask(
name='playground',
upstreams=[install_symlink],
cmd=[
'set -e',
f'cd {CURRENT_DIR}',
'echo "🤖 Remove playground"',
'sudo rm -Rf playground',
'echo "🤖 Create playground"',
'cp -R playground-template playground',
f'cd {CURRENT_DIR}/playground',
'echo "🤖 Seed project"',
'./seed-project.sh',
'echo "🤖 Change to playground directory:"',
f'echo " cd {CURRENT_DIR}/playground"',
'echo "🤖 Or playground project directory:"',
f'echo " cd {CURRENT_DIR}/playground/my-project"',
'echo "🤖 You can also test pip package:"',
f'echo " cd {CURRENT_DIR}/playground/my-project/src/zrb-coba-test"', # noqa
'echo " source .venv/bin/activate"',
'echo " zrb-coba-test"',
'echo "🤖 And start hacking around. Good luck :)"',
],
retry=0,
preexec_fn=None
)
runner.register(playground) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/zrb_init.py | zrb_init.py |
from zrb import (
runner, Env,
StrInput, ChoiceInput, IntInput, BoolInput, FloatInput, PasswordInput,
Group, Task, CmdTask, HTTPChecker, python_task
)
# Simple Python task.
# Usage example: zrb concat --separator=' '
concat = Task(
name='concat', # Task name
inputs=[StrInput(name='separator', description='Separator', default=' ')],
run=lambda *args, **kwargs: kwargs.get('separator', ' ').join(args)
)
runner.register(concat)
# Simple Python task with multiple inputs.
register_trainer = Task(
name='register-trainer',
inputs=[
StrInput(name='name', default=''),
PasswordInput(name='password', default=''),
IntInput(name='age', default=0),
BoolInput(name='employed', default=False),
FloatInput(name='salary', default=0.0),
ChoiceInput(
name='starter-pokemon',
choices=['bulbasaur', 'charmender', 'squirtle']
)
],
run=lambda *args, **kwargs: kwargs
)
runner.register(register_trainer)
# Simple Python task with decorator
@python_task(
name='fibo',
inputs=[IntInput(name='n', default=5)],
runner=runner
)
async def fibo(*args, **kwargs):
n = int(args[0]) if len(args) > 0 else kwargs.get('n')
if n <= 0:
return None
elif n == 1:
return 0
elif n == 2:
return 1
else:
a, b = 0, 1
for i in range(n - 1):
a, b = b, a + b
return a
# Simple CLI task.
# Usage example: zrb hello --name='world'
hello = CmdTask(
name='hello',
inputs=[StrInput(name='name', description='Name', default='world')],
cmd='echo Hello {{input.name}}'
)
runner.register(hello)
# Command group: zrb make
make = Group(name='make', description='Make things')
# CLI task, part of `zrb make` group, depends on `hello`
# Usage example: zrb make coffee
make_coffee = CmdTask(
name='coffee',
group=make,
upstreams=[hello],
cmd='echo Coffee for you ☕'
)
runner.register(make_coffee)
# CLI task, part of `zrb make` group, depends on `hello`
# Usage example: zrb make beer
make_beer = CmdTask(
name='beer',
group=make,
upstreams=[hello],
cmd='echo Cheers 🍺'
)
runner.register(make_beer)
# Command group: zrb make gitignore
make_gitignore = Group(
name='gitignore', description='Make gitignore', parent=make
)
# CLI task, part of `zrb make gitignore` group,
# making .gitignore for Python project
# Usage example: zrb make gitignore python
make_gitignore_python = CmdTask(
name='python',
group=make_gitignore,
cmd=[
'echo "node_modules/" >> .gitignore'
'echo ".npm" >> .gitignore'
'echo "npm-debug.log" >> .gitignore'
]
)
runner.register(make_gitignore_python)
# CLI task, part of `zrb make gitignore` group,
# making .gitignore for Node.js project
# Usage example: zrb make gitignore node
make_gitignore_nodejs = CmdTask(
name='node',
group=make_gitignore,
cmd=[
'echo "__pycache__/" >> .gitignore'
'echo "venv" >> .gitignore'
]
)
runner.register(make_gitignore_nodejs)
# Long running CLI task
# Usage example: zrb start-server dir='.'
start_server = CmdTask(
name='start-server',
upstreams=[make_coffee, make_beer],
inputs=[StrInput(name='dir', description='Directory', default='.')],
envs=[Env(name='PORT', os_name='WEB_PORT', default='3000')],
cmd='python -m http.server $PORT --directory {{input.dir}}',
checkers=[HTTPChecker(port='{{env.PORT}}')]
)
runner.register(start_server)
# CLI task, depends on `start-server`, throw error
# Usage example: zrb test-error
test_error = CmdTask(
name='test-error',
upstreams=[start_server],
cmd='sleep 3 && exit 1',
retry=0
)
runner.register(test_error) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/playground-template/zrb_init.py | zrb_init.py |
set -e
echo '🤖 Remove my-project'
rm -Rf my-project
export ZRB_SHOW_PROMPT=0
echo '🤖 Create my-project'
zrb project create --project-dir my-project --project-name "My Project"
cd my-project
echo '🤖 Add cmd-task'
zrb project add cmd-task \
--project-dir . \
--task-name "run-cmd"
echo '🤖 Add docker-compose-task'
zrb project add docker-compose-task \
--project-dir . \
--task-name "run-container" \
--compose-command "up" \
--http-port 3001
echo '🤖 Add python-task'
zrb project add python-task \
--project-dir . \
--task-name "run-python"
echo '🤖 Add simple-python-app'
zrb project add simple-python-app \
--project-dir . \
--app-name "simple" \
--http-port 3002
echo '🤖 Add fastapp'
zrb project add fastapp \
--project-dir . \
--app-name "fastapp" \
--http-port 3003
echo '🤖 Add fastapp module'
zrb project add fastapp-module \
--project-dir . \
--app-name "fastapp" \
--module-name "library"
echo '🤖 Add fastapp crud'
zrb project add fastapp-crud \
--project-dir . \
--app-name "fastapp" \
--module-name "library" \
--entity-name "book" \
--plural-entity-name "books" \
--column-name "code"
echo '🤖 Add fastapp field'
zrb project add fastapp-field \
--project-dir . \
--app-name "fastapp" \
--module-name "library" \
--entity-name "book" \
--column-name "title" \
--column-type "str"
echo '🤖 Add python package'
zrb project add pip-package \
--project-dir . \
--package-name "zrb-coba-test" \
--package-description "A test package" \
--package-homepage "https://github.com/state-alchemists/zrb" \
--package-bug-tracker "https://github.com/state-alchemists/zrb/issues" \
--package-author-name "Go Frendi" \
--package-author-email "[email protected]" \
echo '🤖 Add generator'
zrb project add app-generator \
--template-name "coba-app"
echo '🤖 Test run generator'
zrb project add coba-app \
--project-dir . \
--app-name "coba" \
--app-image-name "docker.io/gofrendi/coba" \
--app-port "8080" \
--env-prefix "COBA"
echo '🤖 Test fastapp'
zrb project test-fastapp
echo '🤖 Test Install pip package symlink'
zrb project install-zrb-coba-test-symlink | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/playground-template/seed-project.sh | seed-project.sh |
🔖 [Table of Contents](README.md)
# For maintainers
To publish Zrb, you need a `Pypi` account:
- Log in or register to [https://pypi.org/](https://pypi.org/)
- Create an API token
You can also create a `TestPypi` account:
- Log in or register to [https://test.pypi.org/](https://test.pypi.org/)
- Create an API token
Once you have your API token, you need to create a `~/.pypirc` file:
```
[distutils]
index-servers =
pypi
testpypi
[pypi]
repository = https://upload.pypi.org/legacy/
username = __token__
password = pypi-xxx-xxx
[testpypi]
repository = https://test.pypi.org/legacy/
username = __token__
password = pypi-xxx-xxx
```
To publish Zrb, you can do the following command:
```bash
source ./project.sh
docker login -U stalchmst
zrb publish
```
🔖 [Table of Contents](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/for-maintainers.md | for-maintainers.md |
🔖 [Table of Contents](README.md)
# For contributors
As contributors, there are some things you can do:
- Increase code coverage
- Report bugs
- Help others in discussion
To develop Zrb in your local computer, you need to fork Zrb repository and pull it to your local computer.
Once you do so, you can activate the environment, run Zrb test and play with playground.
```bash
source ./project.sh
# Run test
zrb test
# Start playground
zrb playground
```
If you find any bugs, or want to add new features, please open a pull request on Zrb repository.
🔖 [Table of Contents](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/for-contributors.md | for-contributors.md |
🔖 [Table of Contents](README.md)
# Getting started
Zrb is an automation tool. With Zrb you can run tasks using command-line-interface.
There are project tasks and common tasks. Project tasks are usually bind to a project, while common tasks can be executed from anywhere.
# Running a common task
To run a common task, you can type `zrb [task-groups] <task-name> [task-parameters]`.
For example, you want to run `encode` task under `base64` group, you can do so by execute the following:
```bash
zrb base64 encode --text "non-credential-string"
```
```
Support zrb growth and development!
☕ Donate at: https://stalchmst.com/donation
🐙 Submit issues/pull requests at: https://github.com/state-alchemists/zrb
🐤 Follow us at: https://twitter.com/zarubastalchmst
🤖 ➜ 2023-06-11T05:09:06.283002 ⚙ 3549 ➤ 1 of 1 • 🍋 zrb base64 encode • zrb base64 encode completed in 0.11719107627868652 seconds
To run again: zrb base64 encode --text "non-credential-string"
bm9uLWNyZWRlbnRpYWwtc3RyaW5n
```
Related tasks are usually located under the same group. For example, you have `decode` task under `base64` group as well.
```bash
zrb base64 decode --text "bm9uLWNyZWRlbnRpYWwtc3RyaW5n"
```
Don't worry if you can't remember all available `task-group`, `task-name`, or `task-parameters`. Just press enter at any time, and Zrb will show you the way.
```bash
zrb
```
```
Usage: zrb [OPTIONS] COMMAND [ARGS]...
Super framework for your super app.
Options:
--help Show this message and exit.
Commands:
base64 Base64 operations
concat concat
devtool Developer tools management
env Environment variable management
eval Evaluate Python expression
fibo fibo
hello hello
make Make things
md5 MD5 operations
explain Explain things
project Project management
register-trainer register-trainer
start-server start-server
test-error test-error
ubuntu Ubuntu related commands
update Update zrb
version Get Zrb version
```
Once you find your task, you can just type the task without bothering about the parameters. Zrb will prompt you to fill the parameter interactively.
```bash
zrb base64 encode
```
```
Text []: non-credential-string
Support zrb growth and development!
☕ Donate at: https://stalchmst.com/donation
🐙 Submit issues/pull requests at: https://github.com/state-alchemists/zrb
🐤 Follow us at: https://twitter.com/zarubastalchmst
🤖 ➜ 2023-06-11T05:27:07.824839 ⚙ 3740 ➤ 1 of 1 • 🍌 zrb base64 encode • zrb base64 encode completed in 0.11709976196289062 seconds
To run again: zrb base64 encode --text "non-credential-string"
bm9uLWNyZWRlbnRpYWwtc3RyaW5n
```
# Creating a project
To make things more manageable, you can put related task definitions and resources under the same project.
You can create a project by invoking `zrb project create` as follow:
```bash
zrb project create --project-dir my-project
```
Once invoked, you will have a directory named `my-project`. You can move to the directory and start to see how a project looks like:
```bash
cd my-project
ls -al
```
```
total 44
drwxr-xr-x 5 gofrendi gofrendi 4096 Jun 11 05:29 .
drwxr-xr-x 4 gofrendi gofrendi 4096 Jun 11 05:29 ..
drwxr-xr-x 7 gofrendi gofrendi 4096 Jun 11 05:29 .git
-rw-r--r-- 1 gofrendi gofrendi 21 Jun 11 05:29 .gitignore
-rw-r--r-- 1 gofrendi gofrendi 1776 Jun 11 05:29 README.md
drwxr-xr-x 3 gofrendi gofrendi 4096 Jun 11 05:29 _automate
-rwxr-xr-x 1 gofrendi gofrendi 1517 Jun 11 05:29 project.sh
-rw-r--r-- 1 gofrendi gofrendi 12 Jun 11 05:29 requirements.txt
drwxr-xr-x 2 gofrendi gofrendi 4096 Jun 11 05:29 src
-rw-r--r-- 1 gofrendi gofrendi 34 Jun 11 05:29 template.env
-rw-r--r-- 1 gofrendi gofrendi 54 Jun 11 05:29 zrb_init.py
```
A project is a directory containing `zrb_init.py`. All task definitions should be declared/imported to this file.
When you create a project by using `zrb project create`, you will also see some other files/directory:
- `.git` and `.gitignore`, indicating that your project is also a git repository.
- `README.md`, your README file.
- `project.sh`, a shell script to initiate your project.
- `requirements.txt`, list of necessary python packages to run start a project. Make sure to update this if you declare a task that depends on other Python library.
- `template.env`, your default environment variables.
- `_automate`, a directory contains task definitions that should be imported in `zrb_init.py`.
- `src`, your project resources (e.g., source code, docker compose file, helm charts, etc)
By default, Zrb will create several tasks under your project. Try to type:
```bash
zrb project
```
```
Usage: zrb project [OPTIONS] COMMAND [ARGS]...
Project management
Options:
--help Show this message and exit.
Commands:
add Add resources to project
build-images Build project images
create create
deploy Deploy project
destroy Remove project deployment
get-default-env Get default values for project environments
push-images Build project images
remove-containers Remove project containers
start Start project
start-containers Start as containers
stop-containers Stop project containers
```
# Adding a Cmd task
Once your project has been created, it's time to add some tasks to your project.
Let's say you work for a company named `Arasaka`, and you want to show a cool CLI banner for your company.
```bash
zrb project add cmd-task --project-dir . --task-name show-banner
```
Zrb will automatically do a few things for you:
- Create `_automate/show_banner.py`
- Import `_automate.show_banner` into `zrb_init.py`.
Now you can try to run the task:
```bash
zrb project show-banner
```
```
🤖 ➜ 2023-06-11T05:52:27.267892 ⚙ 4388 ➤ 1 of 3 • 🍓 zrb project show-banner • Run script: echo show banner
🤖 ➜ 2023-06-11T05:52:27.268193 ⚙ 4388 ➤ 1 of 3 • 🍓 zrb project show-banner • Current working directory: /home/gofrendi/zrb/playground/my-project
🤖 ➜ 2023-06-11T05:52:27.272726 ⚙ 4389 ➤ 1 of 3 • 🍓 zrb project show-banner • show banner
Support zrb growth and development!
☕ Donate at: https://stalchmst.com/donation
🐙 Submit issues/pull requests at: https://github.com/state-alchemists/zrb
🐤 Follow us at: https://twitter.com/zarubastalchmst
🤖 ➜ 2023-06-11T05:52:27.318296 ⚙ 4389 ➤ 1 of 3 • 🍓 zrb project show-banner • zrb project show-banner completed in 0.11460638046264648 seconds
To run again: zrb project show-banner
show banner
```
Now let's make the banner cooler with `figlet`. You can do so by editing `_automate/show_banner.py`. If you are using VSCode, you can type `code .` in your terminal.
> ⚠️ We will use `figlet`. Try to type `figlet hello` and see whether things work or not. If you are using Ubuntu, you might need to install figlet by invoking `sudo apt install figlet`.
Make sure to modify the `cmd` property of your `show_banner` task, so that it looks like the following:
```python
from zrb import CmdTask, runner
from zrb.builtin._group import project_group
###############################################################################
# Task Definitions
###############################################################################
show_banner = CmdTask(
name='show-banner',
description='show banner',
group=project_group,
cmd=[
'figlet Arasaka'
]
)
runner.register(show_banner)
```
Cool. You make it. [Saburo Arasaka](https://cyberpunk.fandom.com/wiki/Saburo_Arasaka) will be proud of you 😉.
# Adding another Cmd Task: Run Jupyterlab
Arasaka is a data-driven (and family-driven) company. They need their data scientists to experiment a lot to present the most valuable information/knowledge.
For this, they need to be able to create a lot of notebooks for experimentation.
To make sure things work, you need to:
- Install jupyterlab.
- Add Jupyterlab to your `requirements.txt`.
- Create a `notebooks` directory under `src`.
- Create a `start-jupyter` task.
Let's start by installing jupyterlab
```bash
pip install jupyterlab
```
Once jupyterlab has been installed, you need to add it into requirements.txt. You can do so by typing `pip freeze | grep jupyterlab` and add the output to your `requirements.txt`. Or you can do it with a single command:
```bash
pip freeze | grep jupyterlab >> requirements.txt
```
Now let's make a `notebooks` directory under `src`.
```bash
mkdir -p src/notebooks
touch src/notebooks/.gitkeep
```
You need an empty `.gitkeep` file, to tell git to not ignore the directory.
## Adding start-jupyterlab
We have a few requirements for `start-jupyterlab` task
- You should show Arasaka banner before starting jupyterlab.
- `start-jupyterlab` is considered completed only if the port is accessible.
- Arasaka employee can choose the port to serve jupyterlab in their computer.
Let's start by adding the task to your project.
```bash
zrb project add cmd-task --project-dir . --task-name start-jupyterlab
```
Now, let's modify `_automate/start_jupyterlab.py` into the following:
```python
from zrb import CmdTask, runner, IntInput, PortChecker
from zrb.builtin._group import project_group
from _automate.show_banner import show_banner
import os
###############################################################################
# Task Definitions
###############################################################################
notebook_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'src', 'notebooks'
)
start_jupyterlab = CmdTask(
name='start-jupyterlab',
description='start jupyterlab',
group=project_group,
inputs=[
IntInput(name='jupyterlab-port', default=8080)
],
upstreams=[show_banner],
cmd='jupyter lab --no-browser --port={{input.jupyterlab_port}} ' +
f'--notebook-dir="{notebook_path}"',
checkers=[
PortChecker(port='{{input.jupyterlab_port}}')
]
)
runner.register(start_jupyterlab)
```
First of all, we import `IntInput` and `PortChecker`, so that we can ask the user to choose the port number and check whether jupyterlab has been started on that port.
We also need to import `os`, so that we can determine the location of your `notebook_path`.
Finally we add some properties to `start_jupyterlab`:
- `inputs`: List of user inputs. We add a new input named `jupyterlab-port`. By default, the value will be `8080`.
- `upstreams`: List of tasks that should be completed before the current task is started. We want `show_banner` to be executed here.
- `cmd`: Command to be executed. You can use Jinja templating here. `{{input.jupyterlab_port}}` refers to the value `jupyterlab-port` input.
- `checkers`: List of task to determine whether current task has been completed or not. In this case we want to make sure that the port has already available for requests.
## Starting the jupyterlab
Finally, let's see and make sure things are working:
```
Jupyterlab port [8080]:
🤖 ➜ 2023-06-11T06:45:42.731189 ⚙ 6237 ➤ 1 of 3 • 🐨 zrb project show-banner • Run script: figlet Arasaka
🤖 ➜ 2023-06-11T06:45:42.731499 ⚙ 6237 ➤ 1 of 3 • 🐨 zrb project show-banner • Current working directory: /home/gofrendi/zrb/playground/my-project
🤖 ➜ 2023-06-11T06:45:42.736205 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner • _ _
🤖 ➜ 2023-06-11T06:45:42.736518 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner • / \ _ __ __ _ ___ __ _| | ____ _
🤖 ➜ 2023-06-11T06:45:42.736782 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner • / _ \ | '__/ _` / __|/ _` | |/ / _` |
🤖 ➜ 2023-06-11T06:45:42.737349 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner • / ___ \| | | (_| \__ \ (_| | < (_| |
🤖 ➜ 2023-06-11T06:45:42.737637 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner • /_/ \_\_| \__,_|___/\__,_|_|\_\__,_|
🤖 ➜ 2023-06-11T06:45:42.737940 ⚙ 6238 ➤ 1 of 3 • 🐨 zrb project show-banner •
🤖 ➜ 2023-06-11T06:45:42.741398 ⚙ 6237 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • Run script: jupyter lab --no-browser --port=8080 --notebook-dir="/home/gofrendi/zrb/playground/my-project/src/notebooks"
🤖 ➜ 2023-06-11T06:45:42.741681 ⚙ 6237 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • Current working directory: /home/gofrendi/zrb/playground/my-project
🤖 ⚠ 2023-06-11T06:45:43.347664 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.347 ServerApp] Package jupyterlab took 0.0000s to import
🤖 ⚠ 2023-06-11T06:45:43.354037 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.353 ServerApp] Package jupyter_lsp took 0.0061s to import
🤖 ⚠ 2023-06-11T06:45:43.354341 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [W 2023-06-11 06:45:43.353 ServerApp] A `_jupyter_server_extension_points` function was not found in jupyter_lsp. Instead, a `_jupyter_server_extension_paths` function was found and will be used for now. This function name will be deprecated in future releases of Jupyter Server.
🤖 ⚠ 2023-06-11T06:45:43.357141 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.356 ServerApp] Package jupyter_server_terminals took 0.0029s to import
🤖 ⚠ 2023-06-11T06:45:43.357496 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.357 ServerApp] Package notebook_shim took 0.0000s to import
🤖 ⚠ 2023-06-11T06:45:43.357800 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [W 2023-06-11 06:45:43.357 ServerApp] A `_jupyter_server_extension_points` function was not found in notebook_shim. Instead, a `_jupyter_server_extension_paths` function was found and will be used for now. This function name will be deprecated in future releases of Jupyter Server.
🤖 ⚠ 2023-06-11T06:45:43.358139 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.357 ServerApp] jupyter_lsp | extension was successfully linked.
🤖 ⚠ 2023-06-11T06:45:43.360703 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.360 ServerApp] jupyter_server_terminals | extension was successfully linked.
🤖 ⚠ 2023-06-11T06:45:43.364479 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.364 ServerApp] jupyterlab | extension was successfully linked.
🤖 ⚠ 2023-06-11T06:45:43.489074 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.488 ServerApp] notebook_shim | extension was successfully linked.
🤖 ⚠ 2023-06-11T06:45:43.538464 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.537 ServerApp] notebook_shim | extension was successfully loaded.
🤖 ⚠ 2023-06-11T06:45:43.539844 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.539 ServerApp] jupyter_lsp | extension was successfully loaded.
🤖 ⚠ 2023-06-11T06:45:43.540686 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.540 ServerApp] jupyter_server_terminals | extension was successfully loaded.
🤖 ⚠ 2023-06-11T06:45:43.541056 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.540 LabApp] JupyterLab extension loaded from /home/gofrendi/zrb/venv/lib/python3.9/site-packages/jupyterlab
🤖 ⚠ 2023-06-11T06:45:43.541399 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.540 LabApp] JupyterLab application directory is /home/gofrendi/zrb/venv/share/jupyter/lab
🤖 ⚠ 2023-06-11T06:45:43.541722 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.541 LabApp] Extension Manager is 'pypi'.
🤖 ⚠ 2023-06-11T06:45:43.543932 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.543 ServerApp] jupyterlab | extension was successfully loaded.
🤖 ⚠ 2023-06-11T06:45:43.544397 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.543 ServerApp] Serving notebooks from local directory: /home/gofrendi/zrb/playground/my-project/src/notebooks
🤖 ⚠ 2023-06-11T06:45:43.544742 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.544 ServerApp] Jupyter Server 2.6.0 is running at:
🤖 ⚠ 2023-06-11T06:45:43.545059 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.544 ServerApp] http://localhost:8080/lab?token=74085eb7b8304271e028c5e0e01237ebaadbb13a54a64921
🤖 ⚠ 2023-06-11T06:45:43.545395 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.544 ServerApp] http://127.0.0.1:8080/lab?token=74085eb7b8304271e028c5e0e01237ebaadbb13a54a64921
🤖 ⚠ 2023-06-11T06:45:43.545720 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [I 2023-06-11 06:45:43.544 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
🤖 ⚠ 2023-06-11T06:45:43.547067 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • [C 2023-06-11 06:45:43.546 ServerApp]
🤖 ⚠ 2023-06-11T06:45:43.547407 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab •
🤖 ⚠ 2023-06-11T06:45:43.547855 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • To access the server, open this file in a browser:
🤖 ⚠ 2023-06-11T06:45:43.548628 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • file:///home/gofrendi/.local/share/jupyter/runtime/jpserver-6240-open.html
🤖 ⚠ 2023-06-11T06:45:43.549002 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • Or copy and paste one of these URLs:
🤖 ⚠ 2023-06-11T06:45:43.549389 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • http://localhost:8080/lab?token=74085eb7b8304271e028c5e0e01237ebaadbb13a54a64921
🤖 ⚠ 2023-06-11T06:45:43.549734 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • http://127.0.0.1:8080/lab?token=74085eb7b8304271e028c5e0e01237ebaadbb13a54a64921
🤖 ➜ 2023-06-11T06:45:43.641677 ⚙ 6237 ➤ 1 of 1 • 🍐 port-check • Checking localhost:8080 (OK)
Support zrb growth and development!
☕ Donate at: https://stalchmst.com/donation
🐙 Submit issues/pull requests at: https://github.com/state-alchemists/zrb
🐤 Follow us at: https://twitter.com/zarubastalchmst
🤖 ➜ 2023-06-11T06:45:43.643523 ⚙ 6240 ➤ 1 of 3 • 🐶 zrb project start-jupyterlab • zrb project start-jupyterlab completed in 1.103727102279663 seconds
To run again: zrb project start-jupyterlab --jupyterlab-port "8080"
```
Open up your browser on `http://localhost:8080` and start working.
# Now you are ready
We have cover the minimum basics to work ~~for Arasaka~~ with Zrb.
No matter how complex your task will be, the flow will be similar:
- You generate the task
- You modify the task
- You run the task
To learn more about tasks and other concepts, you can visit [the concept section](concepts/README.md).
BTW, do you know that you can make and deploy a CRUD application without even touching your IDE/text editor? Check out [our tutorials](tutorials/README.md) for more cool tricks.
🔖 [Table of Contents](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/getting-started.md | getting-started.md |
🔖 [Table of Contents](README.md)
# Quirks
- Zrb is spelled `Zaruba`.
- If not set, `PYTHONUNBUFFERED` will be set to `1`.
- Once `zrb_init.py` is loaded, Zrb will automatically:
- Set `ZRB_PROJECT_DIR` to `zrb_init.py`'s parent directory.
- If loaded as CLI, Zrb will also:
- Adding `ZRB_PROJECT_DIR` to `PYTHONPATH`.
- Zrb passes several keyword arguments that will be accessible from the task's run method:
- `_args`: Shell argument when the task is invoked.
- `_task`: Reference to the current task.
- You can access the built-in command groups by importing `zrb.builtin_group`.
- How environments are loaded:
- `env_files` has the lowest priority, it will be overridden by `env`
- The last one takes greater priority
- `env` will override each other, the last one takes greater priority
- If you define a `DockerComposeTask`, it will automatically fill your environment with the ones you use in your docker-compose file. The environment defined that way will have a very low priority. They will be overridden by both `env_files` and `env`.
🔖 [Table of Contents](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/quirks.md | quirks.md |
🔖 [Table of Contents](README.md)
# Configuration
You can configure Zrb using environment variables. For example, you can turn off advertisement by set `ZRB_SHOW_ADVERTISEMENT` to `false`
```bash
export ZRB_SHOW_ADVERTISEMENT=false
zrb base64 encode --text non-credential-string
```
Try to set `ZRB_SHOW_ADVERTISEMENT` to `true` and `false` and see the result.
Some configurations are boolean. That's mean you can set them into:
- `true`, `1`, `yes`, `y`, or `active` to represent `True`
- `false`, `0`, `no`, `n`, or `inactivate` to represent `True`
# List of configurations
## `ZRB_ENV`
Environment variable prefix for your tasks. When define, Zrb will first try to find `<ZRB_ENV>_<VARIABLE_NAME>`. If the variable is not defined, Zrb will use `<VARIABLE_NAME>`. Very useful if you have multiple environments (i.e., prod, dev, staging)
- __Default value:__ Empty
- __Possible values:__ Any combination of alpha-numeric and underscore
- __Example:__ `DEV`
## `ZRB_INIT_SCRIPTS`
List of Python scripts that should be loaded by default.
- __Default value:__ Empty
- __Possible values:__ List of script paths, separated by colons(`:`).
- __Example:__ `~/personal/zrb_init.py:~/work/zrb_init.py`
## `ZRB_LOGGING_LEVEL`
Zrb log verbosity.
- __Default value:__ `WARNING`
- __Possible values:__ (sorted from the least verbose to the most verbose)
- `CRITICAL`
- `ERROR`
- `WARNING`
- `WARN` (or `WARNING`)
- `INFO`
- `DEBUG`
## `ZRB_SHELL`
Default shell to run Cmd Task. Should be `bash` compatible.
- __Default value:__ `bash`
- __Possible value:__
- `/usr/bin/bash`
- `/usr/bin/sh`
- etc.
## `ZRB_SHOULD_LOAD_BULTIN`
Whether Zrb should load builtin tasks or not.
- __Default value:__ `true`
- __Possible value:__ boolean values
## `ZRB_SHOW_ADVERTISEMENT`
Whether Zrb should load show advertisement or not.
- __Default value:__ `true`
- __Possible value:__ boolean values
## `ZRB_SHOW_PROMPT`
Whether Zrb should always show prompt or not.
- __Default value:__ `true`
- __Possible value:__ boolean values
🔖 [Table of Contents](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/configurations.md | configurations.md |
🔖 [Table of Contents](../README.md) / [Concepts](README.md)
# Template
You can use [Jinja template](https://jinja.palletsprojects.com/en/3.1.x/templates) for
- task input's default value
- task environment's default value
- several task's properties like `cmd`, `cmd_path`, `setup_cmd`, etc.
There are several render data you can use. Some are always available, while some others are only available in specific properties
# Common render data
The following render data are always available:
- `datetime`: Python datetime module
- `os`: Python os module
- `platform`: Python platform module
- `time`: Python time module
- `util`: Containing several utility function:
- `coalesce(value, *alternatives)`: you can use this function to coalesce value with alternatives
- `coalesce_str(value, *alternatives)`: Same as `util.coalesce`, but empty string is treated as `None` or `undefined`
- `to_camel_case(text)`: Return a `camelCased` text.
- `to_pascal_case(text)`: Return a `PascalCased` text.
- `to_kebab_case(text)`: Return a `kebab-cased` text.
- `to_snake_case(text)`: Return a `snake_cased` text.
- `to_human_readable(text)`: Return a `human readable` text.
- `to_boolean(text)`: Convert text to boolean. This function handle case-insensitive text, but it will throw an error if the text is neither true/false value representation.
- True value: `true`, `1`, `yes`, `y`, `active`
- False value: `false`, `0`, `0`, `n`, `inactive`
# Specific render data
- `input`: Map representation task input's value. Accessible when you set `task environment`'s property or any `task` property.
- `<snake_case_key>`: All task key inputs are snake-cased. These keys are accessible when you set `task environment`'s default property or any `task` property.
- `_task`: Representation of current task object, only accessible from `task` property
- `_kwargs`: Map representation of current task input keyword arguments, only accessible from `task` property
- `_args`: List representation of current task input arguments, only accessible from `task` property
- `env`: Map representation of task environment. Only accessible from task property.
# Example
```python
from zrb import CmdTask, StrInput, Env, runner
demo = CmdTask(
name='demo',
inputs=[
StrInput(
name='app-name',
default='my-app'
),
StrInput(
name='image-name',
default='docker.io/gofrendi/{{util.to_kebab_case(input.app_name)}}'
)
],
envs=[
Env(name='IMAGE', default='{{input.image_name}}')
],
cmd=[
'echo {{ input._args.0 }}',
'{% for arg in input._args %}echo "{{ arg }} ";{% endfor %}',
'echo "Image name (via input): {{ input.image_name }}"',
'echo "Image name (via env): {{ env.IMAGE }}"',
'echo "Image name (via env variable): $IMAGE"',
]
)
runner.register(demo)
```
You can try to invoke:
```
zrb demo --app-name my-app one two three
```
The result will be:
```
one
one
two
three
Image name (via input): docker.io/my-app
Image name (via env): docker.io/my-app
Image name (via env variable): docker.io/my-app
```
🔖 [Table of Contents](../README.md) / [Concepts](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/template.md | template.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# FlowTask
FlowTask allows you to compose several unrelated tasks/actions into a single tasks.
```python
from zrb import FlowTask, FlowNode, CmdTask, HttpChecker, runner
import os
CURRENT_DIR = os.dirname(__file__)
prepare_backend = CmdTask(
name='prepare-backend',
cwd=os.path.join(CURRENT_DIR, 'app', 'backend'),
cmd='pip install -r requirements.txt'
)
prepare_frontend = CmdTask(
name='prepare-backend',
cwd=os.path.join(CURRENT_DIR, 'app', 'frontend'),
cmd='npm install && npm run build'
)
start_app = CmdTask(
name='start-app',
cwd=os.path.join(CURRENT_DIR, 'app', 'backend'),
cmd='uvicorn main:app --port 8080',
checkers=[
HttpChecker(port=8080)
]
)
prepare_and_start_app = FlowTask(
name='prepare-and-start-app',
nodes=[
# Prepare backend and frontend concurrently
[
FlowNode(task=prepare_backend),
FlowNode(task=prepare_frontend)
],
# Then start app
FlowNode(task=start_app),
# And finally show instruction
FlowNode(
name='show-instruction',
cmd='echo "App is ready, Check your browser"'
)
]
)
runner.register(prepare_app)
```
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/flow-task.md | flow-task.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# ResourceMaker
ResourceMaker helps you create text resources, whether they are code or licenses.
For example, let's say you have the following template under `mit-license-template/license`
```
Copyright (c) <zrb_year> <zrb_copyright_holders>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
You want your user to be able to add the license to any app and replacing `<year>` and `<copyright holders>` with user input.
To accomplish this, you can make a resource maker:
```python
from zrb import ResourceMaker, StrInput, runner
import os
CURRENT_DIR = os.path.dirname(__file__)
add_mit_license = ResourceMaker(
name='add-mit-license',
inputs=[
StrInput(name='destination'),
StrInput(name='year'),
StrInput(name='copyright-holder')
],
destination_path='{{input.destination}}',
template_path=os.path.join(CURRENT_DIR, 'mit-license-template'),
replacements={
'<zrb_year>': '{{input.year}}',
'<zrb_copyright_holders>': '{{input.copyright_holder}}',
}
)
runner.register(add_mit_license)
```
Note that your template folder might contains a very complex structure. For example, you can make your application boiler plate into a template.
# ResourceMaker parameters
Every [task parameters](./task.md#common-task-parameters) are applicable here. Additionally, a `ResourceMaker` has it's own specific parameters.
## template_path
## destination_path
## replacements
## replacement_mutator
## excludes
## skip_parsing
# ResourceMaker methods
Please refer to [common task methods](./README.md#common-task-methods).
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/resource-maker.md | resource-maker.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# Checkers
Checkers are special type of tasks. You can use checkers to check for other task's readiness.
Currently there are three types of checkers:
- PathChecker
- PortChecker
- HttpChecker
Let's say you invoke `npm run build:watch`. This command will build your Node.js App into `dist` directory, as well as watch the changes and rebuild your app as soon as there are some changes.
- A web server is considered ready if it's HTTP Port is accessible. You can use `HTTPChecker` to check for web server readiness.
- But, before running the web server to start, you need to build the frontend and make sure that the `src/frontend/dist` has been created. You can use `PathChecker` to check for frontend readiness.
Let's see how we can do this:
```python
from zrb import CmdTask, PathChecker, Env, EnvFile, runner
build_frontend = CmdTask(
name='build-frontend',
cmd='npm run build',
cwd='src/frontend',
checkers=[
PathChecker(path='src/frontend/dist')
]
)
run_server = CmdTask(
name='run-server',
envs=[
Env(name='PORT', os_name='WEB_PORT', default='3000')
],
env_files=[
EnvFile(env_file='src/template.env', prefix='WEB')
]
cmd='python main.py',
cwd='src',
upstreams=[
build_frontend
],
checkers=[HTTPChecker(port='{{env.PORT}}')],
)
runner.register(run_server)
```
> Aside from `PathChecker` and `HTTPChecker`, you can also use `PortChecker` to check for TCP port readiness.
You can then run the server by invoking:
```bash
zrb run-server
```
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/checkers.md | checkers.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# CmdTask
You can use CmdTask to run CLI commands.
Let's see the following example:
```python
from zrb import CmdTask, StrInput, Env, runner
say_hello = CmdTask(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='SOME_ENV')
],
cmd='echo {{input.name}}'
)
runner.register(say_hello)
```
If you need a multi-line command, you can also define the command as a list:
```python
from zrb import CmdTask, StrInput, Env, runner
say_hello = CmdTask(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='SOME_ENV')
],
cmd=[
'echo {{input.name}}',
'echo $_INPUT_NAME', # This will also works
'echo Yeay!!!'
]
)
runner.register(say_hello)
```
However, if your command is too long, you can also load it from another file:
```python
from zrb import CmdTask, StrInput, Env, runner
say_hello = CmdTask(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='SOME_ENV')
],
cmd_path='hello_script.sh'
)
runner.register(say_hello)
```
You can then run the task by invoking:
```bash
zrb say-hello --name=John
```
# CmdTask parameters
Every [task parameters](./task.md#common-task-parameters) are applicable here. Additionally, a `CmdTask` has it's own specific parameters.
## `executable`
Executable to run `cmd` command.
- __Required:__ False
- __Possible values:__ String representing the terminal, for example `bash` or `zsh`.
- __Default value:__ Equals to `ZRB_SHELL` If set. Otherwise it will be `bash`.
## `cmd`
The command to be executed.
Note that your command migt contains Jinja template. For example, you can use `{{input.snake_input_name}}` or `{{env.ENV_NAME}}`.
Additionaly, task inputs are assigned as terminal environment variable, uppercased with `_INPUT_` prefix.
Let's see on the following example:
```python
from zrb import CmdTask, StrInput, Env, runner
say_hello = CmdTask(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='SOME_ENV')
],
cmd=[
'echo {{input.name}}',
'echo $_INPUT_NAME',
'echo {{env.SOME_ENV}}',
'echo $SOME_ENV',
'echo Yeay!!!'
]
)
runner.register(say_hello)
```
- __Required:__ False
- __Possible values:__
- String representing the command
- List of string representing multiline command
- Function returning a string
- __Default value:__ Empty string.
## `cmd_path`
String representing path of the shell script. If set, this will override `cmd`.
- __Required:__ False
- __Possible values:__ String representing shell script location.
- __Default value:__ Empty string.
## `cwd`
String representing current working directory.
- __Required:__ False
- __Possible values:__ String representing current working directory.
- __Default value:__ `None`
## `max_output_line`
How many line of output to be shown
- __Required:__ False
- __Possible values:__ Integer value.
- __Default value:__ `1000`
## `max_error_line`
How many line of error to be shown
- __Required:__ False
- __Possible values:__ Integer value.
- __Default value:__ `1000`
## `preexec_fn`
Function to set process `sid`.
- __Required:__ False
- __Possible values:__ function to set `sid` or `None`.
- __Default value:__ `os.setsid`.
# CmdTask methods
Please refer to [common task methods](./README.md#common-task-methods).
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/cmd-task.md | cmd-task.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# DockerComposeTask
Docker Compose is a convenient way to run containers on your local computer.
Suppose you have the following Docker Compose file:
```yaml
# docker-compose.yml file
version: '3'
services:
# The load balancer
nginx:
image: nginx:1.16.0-alpine
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "${HOST_PORT:-8080}:80"
```
You can define a task to run your Docker Compose file (i.e., `docker compose up`) like this:
```python
from zrb import DockerComposeTask, HTTPChecker, Env, runner
run_container = DockerComposeTask(
name='run-container',
compose_cmd='up',
compose_file='docker-compose.yml',
envs=[
Env(name='HOST_PORT', default='3000')
],
checkers=[
HTTPChecker(
name='check-readiness', port='{{env.HOST_PORT}}'
)
]
)
runner.register(run_container)
```
You can then run the task by invoking:
```bash
zrb run-container
```
Under the hood, Zrb will read your `compose_file` populate it with some additional configuration, and create a runtime compose file `._<compose-file>-<task-name>.runtime.yml`. Zrb will use the run the runtime compose file to run your `compose_cmd` (i.e., `docker compose -f <compose-file>-<task-name>.runtime.yml <compose-cmd>`)
# DockerComposeTask parameters
Every [task parameters](./task.md#common-task-parameters) are applicable here. Additionally, a `DockerComposeTask` has it's own specific parameters.
## `executable`
Executable to run `compose_cmd` and `setup_cmd` command.
- __Required:__ False
- __Possible values:__ String representing the terminal, for example `bash` or `zsh`.
- __Default value:__ Equals to `ZRB_SHELL` If set. Otherwise it will be `bash`.
## `compose_service_configs`
Env and EnFile configuration for each services in your `compose_file`.
For example, you want to set postgre's servie `POSTGRES_USER` into `root`. In that case, you can do:
```python
from zrb import runner, DockerComposeTask, ServiceConfig, Env
start_container = DockerComposeTask(
name='start-container',
compose_cmd='up',
compose_file='docker-compose.yml',
compose_service_configs={
'postgres': ServiceConfig(
envs=[
Env(name='POSTGRES_USER', default='root')
]
)
}
)
runner.register(start_container)
```
- __Required:__ False
- __Possible values:__ Map of `ServiceConfig`.
- __Default value:__ Empty map.
## `compose_file`
Your docker-compose file path.
If not set, Zrb will try to find the following files in your `cwd`:
- `compose.yml`
- `compose.yaml`
- `docker-compose.yml`
- `docker-compose.yaml`
Zrb will throws error if no `compose_file` found.
- __Required:__ False
- __Possible values:__ String representing the docker compose file or `None`.
- __Default value:__ `None`
## `compose_cmd`
Docker compose command (i.e: `docker compose <compose-cmd>`)
- __Required:__ False
- __Possible values:__ String representing the docker compose command.
- __Default value:__ `up`
## `compose_options`
Docker compose options. Type `docker compose --help` to see possible options.
Example:
```python
from zrb import runner, DockerComposeTask, ServiceConfig, Env
start_container = DockerComposeTask(
name='start-container',
compose_cmd='up',
compose_file='docker-compose.yml',
compose_options={
'--project-name': 'my-project',
'--parallel': 5
}
)
runner.register(start_container)
```
- __Required:__ False
- __Possible values:__ Map of compose option.
- __Default value:__ Empty map.
## `compose_flags`
## `compose_args`
## `compose_env_prefix`
## `setup_cmd`
## `setup_cmd_path`
## `cwd`
## `max_output_line`
## `max_error_line`
## `preexec_fn`
# DockerComposeTask methods
Please refer to [common task methods](./README.md#common-task-methods).
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/docker-compose-task.md | docker-compose-task.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md)
# Type of Tasks
There are many task types in Zrb. Every task has their own specific use-cases:
- [CmdTask](cmd-task.md): Run a CLI command
- [Task (python task)](python-task.md): Run a Python function
- [DockerComposeTask](docker-compose-task.md): Run a Docker compose Command
- [Resource Maker](resource-maker.md): Generate artifacts/resources based on templates
- [FlowTask](flow-task.md): Put `CmdTask` and `python task` into single flow.
- [Checkers (HttpChecker, PortChecker, and PathChecker)](checkers.md): Check parent task's readiness.
As every task are extended from `BaseTask`, you will see that most of them share some common parameters.
```
BaseTask
│
│
┌──────┬───────────┬───────────┼───────────┬───────────┬──────────┐
│ │ │ │ │ │ │
│ │ │ │ │ │ │
▼ ▼ ▼ ▼ ▼ ▼ ▼
Task CmdTask ResourceMaker FlowTask HttpChecker PortChecker PathChecker
│
│
▼
DockerComposeTask
```
Aside from the documentation, you can always dive down into [the source code](https://github.com/state-alchemists/zrb/tree/main/src/zrb/task) to see the detail implementation.
> __Note:__ Never initiate `BaseTask` directly, use `Task` instead.
# Task overview
Tasks are building blocks of your automation.
Let's see how you can define tasks and connect them to each others:
```python
from zrb import CmdTask, IntInput, Env, Group, runner, PortChecker
# defining two groups: arasaka, and jupyterlab
# jupyterlab is located under arasaka
arasaka = Group(name='arasaka', description='Arasaka automation')
jupyterlab = Group(name='jupyterlab', parent=arasaka, description='Jupyterlab related tasks')
# defining show banner under `arasaka` group
show_banner = CmdTask(
name='show-banner',
icon='🎉',
color='yellow',
description='Show banner',
group=arasaka,
envs=[
# EMPLOYEE enviornment variable will be accessible from inside the task as USER.
# The default value this variable will be `employee`.
Env(name='USER', os_name='EMPLOYEE', default='employee')
],
cmd=[
'figlet Arasaka',
'echo "Welcome $USER"'
]
)
# registering `show_banner` to zrb runner
runner.register(show_banner)
# defining show banner under `arasaka jupyterlab` group
start_jupyterlab = CmdTask(
name='start',
icon='🧪',
color='green',
description='Start Jupyterlab',
group=jupyterlab,
inputs=[
# Port where jupyterlab should be started
IntInput(name='jupyterlab-port', default=8080)
],
# start_jupyterlab depends on show_banner
upstreams=[show_banner],
cmd='jupyter lab --no-browser --port={{input.jupyterlab_port}}',
checkers=[
PortChecker(port='{{input.jupyterlab_port}}')
],
retry=2,
retry_interval=3
)
# registering `show_banner` to zrb runner
runner.register(start_jupyterlab)
```
You can try to run `start_jupyterlab` task as follow:
```bash
export EMPLOYEE="Yorinobu Arasaka"
# The following command will
# - Show Arasaka Banner
# - Start jupyterlab on the port you choose (by default it is 8080)
zrb arasaka jupyterlab start
```
As `start_jupyterlab` has `show_banner` as it's upstream, you can expect the `show_banner` to be executed prior to `start_jupyterlab`.
A task might also have multiple upstreams. In that case, the upstreams will be executed concurrently.
> __Note:__ Only tasks registered to `runner` are directly accessible from the CLI.
# Task Lifecycle
Every task has it's own lifecycle.
```
┌────────────────────────────┐
│ │
│ ▼
│ ┌──► Ready ────► Stopped
│ │ ▲
Triggered ────► Waiting ────► Started ───┤ │
▲ │ │
│ └──► Failed ────────┘
│ │
│ │
│ ▼
└─────────── Retry
```
> __Note:__ `Ready` and `Stopped` is interchangable. If your task is not a long running process, it most likely `stopped` first before `ready`.
- `Triggered`: Task is triggered and will be executed.
- `Waiting`: Task won't be started until all it's upstreams are ready.
- `Started`: Zrb has start the task.
- `Failed`: The task is failed, due to internal error or other causes. A failed task can be retried or stopped, depends on `retries` setting.
- `Retry`: The task has been failed and now rescheduled to be started.
- `Ready`: The task is ready. Some tasks are automatically stopped after ready, but some others keep running in the background (e.g., web server, scheduler, etc)
- `Stopped`: The task is no longer running.
# Common task parameters
Zrb tasks share some common parameters like `name`, `icon`, `color`, `description`, etc.
Some parameters are required, while some others are optionals. Please refer to [each specific task documentation](#type-of-tasks) for more complete list of parameters.
## `name`
Task name
- __Required:__ True
- __Possible values:__ Any string
## `color`
Color representing the task. If not set, Zrb will choose a random color for your task.
- __Required:__ False
- __Possible values:__
- `green`
- `yellow`
- `blue`
- `magenta`
- `cyan`
- `light_green`
- `light_yellow`
- `light_blue`
- `light_magenta`
- `light_cyan`
- __Default value:__ One of the possible values, set randomly during runtime.
## `icon`
Icon representing the task. If not set, Zrb will choose a random icon for your task.
- __Required:__ False
- __Possible values:__ Any emoji
- __Default value:__ Set randomly during runtime
## `description`
Description of the task.
- __Required:__ False
- __Possible values:__ Any string
- __Default value:__ Empty string
## `group`
Task group where the current task is located.
You can create a group like this:
```python
arasaka = Group(name='arasaka', description='Arasaka automation')
```
You can also put a group under another group:
```python
jupyterlab = Group(name='jupyterlab', parent=arasaka, description='Jupyterlab related tasks')
```
- __Required:__ False
- __Possible values:__ `Group` or `None`
- __Default value:__ `None`
## `inputs`
List of task inputs. There are multiple type of task inputs you can choose:
- `BoolInput`
- `ChoiceInput`
- `FloatInput`
- `IntInput`
- `PasswordInput`
- `StrInput`
See the following example:
```python
from zrb import (
runner, CmdTask,
StrInput, ChoiceInput, IntInput, BoolInput, FloatInput, PasswordInput
)
register_trainer = CmdTask(
name='register-trainer',
inputs=[
StrInput(name='name', default=''),
PasswordInput(name='password', default=''),
IntInput(name='age', default=0),
BoolInput(name='employed', default=False),
FloatInput(name='salary', default=0.0),
ChoiceInput(
name='starter-pokemon',
choices=['bulbasaur', 'charmender', 'squirtle']
)
],
cmd=[
'echo "Name: {{input.name}}"',
'echo "Password (sorry, we should not show this): {{input.password}}"',
'echo "Age: {{input.age}}"',
'echo "Employed: {{input.employed}}"',
'echo "Salary: {{input.salary}}"',
'echo "Starter Pokemon: {{input.starter_pokemon}}"',
]
)
runner.register(register_trainer)
```
> Note: When you access inputs using Jinja (i.e., `{{input.snake_input_name}}`) syntax, input name is automatically snake-cased.
- __Required:__ False
- __Possible values:__ List of `Input` object
- __Default value:__ `[]`
## `envs`
List of task envs. Task envs allow you to map task environment into os environment.
In the following example, we map OS `$EMPLOYEE` into task `$USER`, as well as set default value `employee` to the task env variable:
```python
from zrb import CmdTask, Env, runner
show_banner = CmdTask(
name='show-banner',
envs=[
Env(name='USER', os_name='EMPLOYEE', default='employee')
],
cmd=[
'figlet Arasaka',
'echo "Welcome $USER"'
'echo "こんにちは {{env.USER}}"'
]
)
runner.register(show_banner)
```
Just like `input`, you can also use Jinja syntax (i.e., `{{env.USER}}`)
- __Required:__ False
- __Possible values:__ List of `Env` object
- __Default value:__ `[]`
## `env_files`
List of task environment files.
Defining environment can be a challenging task, especially if you have a lot of it. Zrb allows you load environment from `*.env` file.
For example, you have the following environment file:
```bash
# File location: template.env
PORT=8080
INSTALL_REQUIREMENTS=1
```
You can load the environment file like the following:
```python
from zrb import CmdTask, EnvFile, runner
# defining show banner under `arasaka jupyterlab` group
start_jupyterlab = CmdTask(
name='start-jupyter',
env_files=[
EnvFile(env_file='/path/to/template.env', prefix='JUPYTER')
],
cmd=[
'if [ "$INSTALL_REQUIREMENTS" = "1" ]',
'then',
' pip install -r requirements.txt',
'fi',
'jupyter lab --no-browser --port={{env.PORT}}',
],
checkers=[
PortChecker(port='{{env.PORT}}')
]
)
# registering `show_banner` to zrb runner
runner.register(start_jupyterlab)
```
Finally, you can set `PORT` and `INSTALL_REQUIREMENTS` using your environment file prefix (`JUPYTER`):
```bash
export JUPYTER_PORT=3000
export INSTALL_REQUIREMENTS=0
zrb start-jupyter
```
This will let you avoid environment variable colisions.
- __Required:__ False
- __Possible values:__ List of `EnvFile` object
- __Default value:__ `[]`
## `upstreams`
List of upstream tasks. Before running a task, Zrb will make sure that it's upstreams are ready.
Just like in our previous example `start_jupyterlab` will not started before `show_banner` is ready.
- __Required:__ False
- __Possible values:__ List of `Task` object
- __Default value:__ `[]`
## `skip_execution`
Boolean, a function returning a boolean, or Jinja syntax that rendered to boolean.
If `skip_execution` is evaluated to `True`, then the task will be considered as completed without being started.
- __Required:__ False
- __Possible values:__ Boolean, a function returning a boolean, or Jinja syntax that rendered to boolean.
- __Default value:__ `False`
# Common task methods
Every task share some common methods like `run`, `check`, and `to_function`.
## `get_env_map`
Return task environments as dictionary.
This is typically useful if you create a Python task. Zrb won't override `os.environ`, so you can't load task environment using `os.environ.get` or `os.getenv`. Instead, you have to use `task.get_env_map`.
Example:
```python
from zrb import Task, Env, python_task, runner
@python_task(
name='show-env',
envs=[
Env(name='POKEMON_NAME', default='charmender'),
Env(name='ELEMENT', default='fire'),
],
runner=runner
)
def show_env(*args: Any, **kwargs: Any):
task: Task = kwargs.get('_task')
inputs = task.get_env_map()
return inputs # should return {'POKEMON_NAME': 'charmender', 'ELEMENT': 'fire'}
```
## `get_input_map`
Return task inputs as dictionary.
This is typically useful if you create a Python task.
Example:
```python
from zrb import Task, StrInput, python_task, runner
@python_task(
name='show-env',
inputs=[
StrInput(name='pokemon-name', default='charmender'),
StrInput(name='element', default='fire'),
],
runner=runner
)
def show_env(*args: Any, **kwargs: Any):
task: Task = kwargs.get('_task')
inputs = task.get_input_map()
return inputs # should return {'pokemon_name': 'charmender', 'element': 'fire'}
```
## `run`
Method to be executed when a task is started. You can extend `BaseTask` and override this method if you think you need to.
Example:
```python
from zrb import BaseTask, Task
class MyTask(BaseTask):
def run(self, *args: Any, **kwargs: Any) -> Any:
task: Task = kwargs.get('_task')
task.print_out(f'args: {args}, kwargs: {kwargs}')
# TODO: do anything here
return super().run(*args, **kwargs)
```
## `check`
Method to check task readiness. You can extend `BaseTask` and override this method if you think you need to.
Example:
```python
from zrb import BaseTask
class MyTask(BaseTask):
def check(self) -> bool:
# TODO: Add your custom checking here
return super().check()
```
## `to_function`
Method to create main-loop. Once a main-loop is created, you can perform a function call to it.
Example:
```python
from zrb import CmdTask, Env, runner
show_banner = CmdTask(
name='show-banner',
envs=[
Env(name='USER', os_name='EMPLOYEE', default='employee')
],
cmd=[
'figlet Arasaka',
'echo "Welcome $USER"'
'echo "こんにちは {{env.USER}}"'
]
)
show_banner_loop = show_banner.to_function()
print(show_banner_loop()) # Now you can run your task as a normal python function
```
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/README.md | README.md |
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
# `Task`
You can turn any function with `args` and `kwargs` argument into a Python Task.
```python
from zrb import Task, Env, StrInput, runner
def say_hello(*args, **kwargs) -> str:
name = kwargs.get('name')
greetings = f'Hello, {name}'
return greetings
say_hello_task = Task(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='PYTHONUNBUFFERED', default='1')
],
run=say_hello
)
runner.register(say_hello_task)
```
In the example, you define a function named `say_hello`.
Then you make a task named and set it's `run` property to `say_hello` function. As you notice, Zrb will automatically pass task input into `kwargs` argument.
You can run the task by invoking:
```
zrb say-hello --name John
```
Thiw will give you a `Hello John`.
# Using `python_task` decorator
Aside from defining the function and the task separately, you can also use `python_task` decorator to turn your function into a task.
```python
from zrb import python_task, Env, StrInput, runner
@python_task(
name='say-hello',
inputs=[
StrInput(name='name')
],
envs=[
Env(name='PYTHONUNBUFFERED', default='1')
],
runner=runner
)
def say_hello(*args, **kwargs) -> str:
name = kwargs.get('name')
greetings = f'Hello, {name}'
task = kwargs.get('_task')
task.print_out(greetings)
return greetings
```
Notice that you can now use `runner` as decorator argument so that you don't need to call `runner.register(say_hello)`.
You can then run the task by invoking:
```bash
zrb say-hello --name=John
```
You can also use `async` function if you think you need to.
# Task parameters
Every [common task parameters](./README.md#common-task-parameters) are applicable here. Additionally, a `Task` has some specific parameters.
## `checkers`
List of checker tasks to mark task as ready.
A lot of background processes will keep running forever. You need a way to make sure whether the process is ready or not.
To do this, we can use another type of tasks:
- `PortChecker`: Check whether TCP port is ready or not.
- `PathChecker`: Check whether file/directory is exists.
- `HttpChecker`: Check whether HTTP port is ready or not.
Let's see some example:
```python
from zrb import CmdTask, HttpChecker, PathChecker, runner
build_frontend = CmdTask(
name='build-frontend',
cmd='npm run build --watch',
checkers=[
PathChecker(path='src/frontend/build')
]
)
runner.register(build_frontend)
start_server = CmdTask(
name='start-server',
cmd='nodemon start',
upstreams=[build_frontend],
checkers=[
HttpChecker(port='80', url='/readiness')
]
)
runner.register(start_server)
```
The `build_frontend` task will keep running in the background. It will check for any changes, and re-compile the frontend.
You can consider `build_frontend` to be ready if `src/frontend/build` is exists.
Once `build_frontend` ready, `start_server` can be started. This task is considered as ready once port 80 is available.
- __Required:__ False
- __Possible values:__ List of `Task` object
- __Default value:__ `[]`
## `retry`
How many time the task will be retried before it is considered as fail.
- __Required:__ False
- __Possible values:__ Integer numbers, greater or equal to `0`
- __Default value:__ `2`
## `retry_interval`
Inter retry interval.
- __Required:__ False
- __Possible values:__ Any number greater or equal to `0`
- __Default value:__ `0`
# python_task parameters
When you create a python task using `python_task` decorator, you can use all parameters defined in [the previous section](#task-parameters).
Furthermore, you can also define `runner` parameter.
## `runner`
Runner this task registered to.
- __Required:__ False
- __Possible values:__ `runner` or `None`
- __Default value:__ `runner`
# Task methods
Please refer to [common task methods](./README.md#common-task-methods).
🔖 [Table of Contents](../../README.md) / [Concepts](../README.md) / [Tasks](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/concepts/tasks/python-task.md | python-task.md |
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
# Running task programmatically
Aside from running tasks from the terminal, you can also run tasks programmatically. For example:
```python
from zrb import CmdTask
# Create task
cmd_task = CmdTask(
name='sample',
cmd='echo hello'
)
# Create function
function = cmd_task.to_function(env_prefix='')
# Invoke function, and show the result
result = function()
print(result.output)
```
This is useful if you want to run Zrb tasks from inside your application.
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/tutorials/run-task-programmatically.md | run-task-programmatically.md |
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
# Define task dynamically
Every task definition in Zrb are written in Python.
Thus, you can use looping or any programming tricks to define your tasks.
```python
fruits = {
'apple': '🍏',
'orange': '🍊',
'grapes': '🍇',
}
fruit_upstreams: List[Task] = []
for fruit, emoji in fruits.items():
show_single_fruit = CmdTask(
name=f'show-{fruit}',
description=f'Show {fruit}',
cmd=f'echo {emoji}',
)
runner.register(show_single_fruit)
fruit_upstreams.append(show_single_fruit)
show_fruits = CmdTask(
name='show-fruits',
description='Show fruits',
upstreams=fruit_upstreams,
cmd='echo show fruits'
)
runner.register(show_fruits)
```
In this example, you define `show-apple`, `show-orange`, and `show-grapes` based on `fruits` dictionary.
Then you make another task named `show-fruts` that depends on the previosly defined task.
You can try to run the tasks:
```bash
zrb show-fruits
```
It should run all previous tasks as well.
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/tutorials/define-task-dynamically.md | define-task-dynamically.md |
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
# Development to deployment: Low code
In this demo, you will see how you can use [zrb](https://pypi.org/project/zrb) to:
- Create a CRUD application.
- Add some entities/fields.
- Run the application as monolith/microservices.
- Containerize your application.
- Deploy the application to a Kubernetes cluster.
No coding skill is required.
# Start the demo
Before starting the demo, you need to make sure you have the following software installed:
- Python (3.9 or higher)
- Pip
- Venv
- Docker (with Docker Compose extension)
- Pulumi
- Node.Js
- Npm
- Zrb
- Access to a Kubernetes cluster
> __Note:__ Installing docker desktop will give you docker + docker compose extension, as well as local kubernetes cluster (if enabled).
Once everything is set, you can run the following commands on your terminal:
```bash
echo "👷 Crete my-project"
zrb project create --project-dir my-project --project-name "My Project"
cd my-project
source project.sh
echo "👷 Add fastapp"
zrb project add fastapp --project-dir . --app-name "myapp" --http-port 3000
echo "👷 Add library module"
zrb project add fastapp-module --project-dir . --app-name "myapp" --module-name "library"
echo "👷 Add book entity"
zrb project add fastapp-crud --project-dir . --app-name "myapp" --module-name "library" \
--entity-name "book" --plural-entity-name "books" --column-name "code"
echo "👷 Add title field"
zrb project add fastapp-field --project-dir . --app-name "myapp" --module-name "library" \
--entity-name "book" --column-name "title" --column-type "str"
echo "👷 Start fastapp"
zrb project start-myapp --myapp-run-mode "monolith"
```
The commands will give you:
- A folder named `my-project`.
- A Python application under the project (`my-project/src/myapp`).
- A `library` module under the application (`myproject/src/myapp/src/module/library`)
- A `book` entity under the `library` module.
- A field named `title` under the `book` entity.
It will also run the application on your local computer.
# Open myapp from the browser
You can visit [http://localhost:3000](http://localhost:3000) to see how the application works.
The default username and password will be `root` and `toor`.
Try to add some books.

# Override default username and password
You can override the system username and password by setting some environment variables:
```bash
# press ctrl + c to stop the application
export MYAPP_APP_AUTH_ADMIN_USERNAME=gofrendi
export MYAPP_APP_AUTH_ADMIN_PASSWORD=aVeryStrongPassword73
zrb project start-myapp --myapp-run-mode "monolith"
```
# Checking the process
Currently, `myapp` is running as a single process in your local computer.
Let's confirm this by openning a new terminal and invoking the following command:
```bash
pgrep uvicorn -a
```
You should see a single process to this:
```
4001 ... main:app --host 0.0.0.0 --port 3000 --reload --reload-include index.html
```
# Run Myapp as microservices
Now let's go back to your first terminal, so that you can kill `my-app` process by pressing `ctrl + c`.
Stay in your first terminal, and try to invoke the following command:
```bash
zrb project start-myapp --myapp-run-mode "microservices"
```
Once started, you will be able to access [http://localhost:3000](http://localhost:3000) as what you have done previously.
Now let's invoke the following command on your second terminal:
```bash
pgrep uvicorn -a
```
```
5305 ... main:app --host 0.0.0.0 --port 3000 --reload --reload-include index.html
5307 ... main:app --host 0.0.0.0 --port 3001 --reload --reload-include index.html
5309 ... main:app --host 0.0.0.0 --port 3002 --reload --reload-include index.html
5311 ... main:app --host 0.0.0.0 --port 3003 --reload --reload-include index.html
```
You can see that now you have multiple processes.
Each processes handle different aspect of the application:
- `myapp-gateway` (port: 3000)
- Handle HTTP request from user
- Send RPC request to other services
- Consume RPC reply from other services
- `myapp-auth-service` (port: 3001)
- Handle RPC request related to authentication/authorization
- Send RPC response to gateway
- `myapp-log-service` (port: 3002)
- Handle RPC request related to user activity/entities change history
- Send RPC response to gateway
- `myapp-library-service` (port: 3002)
- Handle RPC request related to book management
- Send RPC response to gateway
You can see that you can run `myapp` as either microservices as monolith. When in doubt, start with monolith.
# Run Myapp as containers
Now let's run `myapp` as containers.
Let's go back to your first terminal, and kill the running process by pressing `ctrl + c`.
To run `myapp` as containers you can invoke:
```bash
zrb project start-myapp-container --myapp-run-mode microservices
```
> __Note:__ You can also run the container as a monolith if you want to. Just invoke `zrb project start-myapp-container --myapp-run-mode monolith`
Now let's see how things look like by invoking:
```bash
docker ps
```
```
CONTAINER ID IMAGE ... PORTS NAMES
868a7e089983 gofrendi/myapp:latest ... 0.0.0.0:3003->3003/tcp myapp_library_service
022bd4d3c86c rabbitmq:3-management ... 4369/tcp, 5671/tcp ... myapp_rabbitmq
afcdbface5b0 adminer ... 0.0.0.0:9001->8080/tcp myapp_adminer
cd5fa960db85 gofrendi/myapp:latest ... 0.0.0.0:3002->3002/tcp myapp_log
5ea14febc9a2 gofrendi/myapp:latest ... 0.0.0.0:3001->3001/tcp myapp-auth
94d382af67ed gofrendi/myapp:latest ... 0.0.0.0:3000->3000/tcp myapp_gateway
20b30ae224d1 postgres:15.2 ... 0.0.0.0:5432->5432/tcp myapp_postgresql
```
You will still able to access [http://localhost:3000](http://localhost:3000) as you previously do. The application is now running as containers.
You can stop the container by pressing `ctrl + c` in your first terminal.
Make sure to also run `zrb project stop-myapp-container` to prevent your containers to be auto-started.
# Deploying Myapp to Kubernetes
For this demo, we are using docker desktop with kubernetes enabled.

Once your kubernetes cluster is running, you can invoke the following command:
```bash
docker login
zrb project deploy-myapp --myapp-deploy-mode microservices
```
Now let's invoke `kubectl get pods` and see how the pods are running
```
NAME READY STATUS RESTARTS AGE
myapp-auth-service-f8ae1d6e-85f4c9546b-snxsg 1/1 Running 0 11m
myapp-gateway-1ef04e6a-675c677c99-cnlj4 1/1 Running 0 11m
myapp-library-service-ea122a8e-cd66f54db-hgkvf 1/1 Running 0 11m
myapp-log-service-2985aef5-6fbcdd8fbd-jfg28 1/1 Running 0 11m
postgresql-0 1/1 Running 0 14m
rabbitmq-0 1/1 Running 0 76s
```
You can also invoke `kubectl get services` to see how the network is configured
```
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 12h
myapp-gateway-e212b722 LoadBalancer 10.106.184.165 localhost 3000:31723/TCP 14m
postgresql ClusterIP 10.108.147.124 <none> 5432/TCP 17m
postgresql-hl ClusterIP None <none> 5432/TCP 17m
rabbitmq ClusterIP 10.104.181.249 <none> 5672/TCP,4369/TCP,... 17m
rabbitmq-headless ClusterIP None <none> 4369/TCP,5672/TCP,... 17m
```
And as always you you can access [http://localhost:3000](http://localhost:3000).
# Conclusion
You have see how you can create, run, and deploy application quickly.
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/tutorials/development-to-deployment-low-code.md | development-to-deployment-low-code.md |
🔖 [Table of Contents](../README.md) / [Tutorials](README.md)
# Preparing your machine
> This guide has been tested under `debian 12` and `ubuntu 22.0.4`
Zrb is a Python package. So, in order to get started, you will need to have:
- Python
- Pip
- Venv
```bash
sudo apt install python-is-python3 python3-dev python3-distutils python3-openssl python3-pip python3-venv
```
# Creating venv for setup
Once you have install Python, pip, and venv, you can make a directory named `zrb-setup` and create a virtual environment.
```bash
mkdir -p ~/zrb-setup
cd ~/zrb-setup
python -m venv .venv
source .venv/bin/activate
```
Please note that whenever you want to work with the virtual environment, you will have to execute `source .venv/bin/activate`.
Creating different virtual environment for each of your projects make your pip packages more manageable.
For example, you will be able to have two different version of the same python package in the different project.
# Install zrb on venv
After having your virtual environment set up, you can install `zrb` on your virtual environment:
```bash
pip install zrb
```
# Install essential packages for ubuntu/debian
Next you can install essential packages for development.
```bash
zrb ubuntu install packages
```
# Setup zsh
Zsh and oh-my-zsh is highly compatible, yet provide the same experience as `bash`.
Installing zsh is not mandatory, but highly recommended.
To setup and load Zsh, you need to run the following command:
```bash
zrb devtool install zsh
```
The command will help you to setup
- Zsh
- Oh-my-zsh
- Zinit
- Zdharma (will be installed once you reload the terminal)
To load the terminal, you need to `exit` from your current session and re-login.
```bash
exit
```
# Setup Tmux
Tmux is terminal multiplexer. It is useful and highly recommended if you need to spawn multiple terminals in a single panel.
To install tmux, you need to invoke the following command:
```bash
zrb devtool install tmux
```
# Setup pyenv
With pyenv, you can manage multiple python environments.
Installing pyenv is highly recommended.
You can install pyenv by invoking the following command:
```bash
zrb devtool install pyenv
```
# Setup nvm
Nvm allows you to manage multiple node.js environments. Node.js is mandatory if you want to run `fastapp` application.
You can install nvm by invoking the following command:
```bash
zrb devtool install nvm
```
# Setup docker and kubectl
If you are using WSL, the most recommended way is by installing docker desktop and enable wsl-integration

Otherwise, you can invoke the following command:
```bash
zrb devtool install docker
zrb devtool install kubectl
```
# Setup pulumi
To setup pulumi, you can invoke the following command:
```bash
zrb devtool install pulumi
```
You need pulumi for app deployment.
# Setup other tools
There are some other tools you might need to install depending on your needs. For example:
- Helm
- Aws CLI
- GCloud CLI
- GVM
- SDKMAN
- Terraform
You can install those tools by invoking the following commands:
```
zrb devtool install helm
zrb devtool install aws
zrb devtool install gcloud
zrb devtool install gvm
zrb devtool install sdkman
zrb devtool install terraform
```
# Next
Now you are ready. Next, you can check our [low code tutorial](./development-to-deployment-low-code.md) or learn [zrb basic concepts](../concepts/README.md).
🔖 [Table of Contents](../README.md) / [Tutorials](README.md) | zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/tutorials/preparing-your-machine-for-development.md | preparing-your-machine-for-development.md |
🔖 [Table of Contents](../README.md) / [Troubleshooting](README.md)
# Enable shell completion
To enable shell completion, you need to set `_ZRB_COMPLETE` variable.
For `bash`:
```bash
eval $(_ZRB_COMPLETE=bash_source zrb)
```
For `zsh`:
```bash
eval $(_ZRB_COMPLETE=zsh_source zrb)
```
Once set, you will have a shell completion in your session:
```bash
zrb <TAB>
zrb md5 hash -<TAB>
```
Visit [click shell completion](https://click.palletsprojects.com/en/8.1.x/shell-completion/) for more information.
🔖 [Table of Contents](../README.md) / [Troubleshooting](README.md)
| zrb | /zrb-0.0.94.tar.gz/zrb-0.0.94/docs/troubleshooting/enable-shell-completion.md | enable-shell-completion.md |
__author__ = 'Zhang Fan'
import json
import redis
from zretry import retry
_retry_func_list = []
def _except_retry(func):
_retry_func_list.append(func.__name__)
return func
class redis_inst():
def __init__(self, host: str or list, port=6379, cluster=False, collname='test', password=None,
decode_responses=True,
retry_interval=1, max_attempt_count=5,
**kw):
'''
创建一个redis客户端
:param host: 单点服务器ip, 集群设置为list类型, 格式[{"host": "IP地址", "port": "端口"}, {"host": "IP地址", "port": "端口"]
:param port: 单点服务器端口
:param cluster: 是否为集群
:param collname: 文档名
:param password: 密码
:param decode_responses: 是否解码
:param retry_interval: 尝试等待时间
:param max_attempt_count: 最大尝试次数
:param kw: 其他参数
'''
if cluster:
from rediscluster import StrictRedisCluster
self._conn = StrictRedisCluster(
startup_nodes=eval(host) if isinstance(host, str) else host,
password=password, decode_responses=decode_responses, **kw)
else:
rpool = redis.ConnectionPool(host=host, port=port, password=password,
decode_responses=decode_responses, **kw)
self._conn = redis.Redis(connection_pool=rpool)
self.collname = collname
for retry_func_name in _retry_func_list:
func = getattr(self, retry_func_name)
decorator = retry(interval=retry_interval, max_attempt_count=max_attempt_count)(func)
setattr(self, retry_func_name, decorator)
# region 其他操作
def change_coll(self, collname):
self.collname = collname
@_except_retry
def collnames(self):
return [name for name in self._conn.scan_iter()]
@_except_retry
def collnames_iter(self):
return self._conn.scan_iter()
@_except_retry
def save_db(self, bgsave=False):
# 返回是否成功
if bgsave:
return self._conn.bgsave()
return self._conn.save()
@_except_retry
def delete_coll(self, *collname):
return self._conn.delete(*collname)
@_except_retry
def rename(self, newname, collname=None):
return self._conn.rename(collname or self.collname, newname)
@_except_retry
def has_collname(self, collname):
return self._conn.exists(collname)
@_except_retry
def type_collname(self, collname):
# 返回文档类型("hash","list","set","zset","string","none")
return self._conn.type(collname)
@_except_retry
def collnames_count(self):
# 统计有多少个文档, 如果是集群则返回一个dict 如 {'xxx.xxx.xxx.xxx:1234': 100}
return self._conn.dbsize()
# endregion
# region 列表操作
@_except_retry
def list_push(self, text: str, front=True):
# 放入一个字符串, 返回队列总数
if front:
return self._conn.lpush(self.collname, text)
else:
return self._conn.rpush(self.collname, text)
@_except_retry
def list_pop(self, front=True):
# 无数据时返回None
if front:
return self._conn.lpop(self.collname)
else:
return self._conn.rpop(self.collname)
@_except_retry
def list_count(self, collname=None):
return self._conn.llen(collname or self.collname)
def list_push_dict(self, item: dict, front=True, encode_chinese=True):
# 返回队列总数
text = json.dumps(item, ensure_ascii=encode_chinese)
return self.list_push(text, front=front)
def list_pop_dict(self, front=True, default=None):
text = self.list_pop(front=front)
if not text is None:
return json.loads(text)
return default
@_except_retry
def list_get_datas(self, start=0, end=-1):
# 列表切片, 和python不同的是, 包含end位置的元素
return self._conn.lrange(self.collname, start, end)
@_except_retry
def list_iter(self):
range_count = 10
count = self.list_count()
index = 0
while index < count:
datas = self.list_get_datas(start=index, end=index + range_count - 1)
for data in datas:
yield data
index += len(datas)
if not datas or index >= count:
return
# endregion
# region 集合操作
@_except_retry
def set_add(self, data):
# 返回是否成功
return self._conn.sadd(self.collname, data) == 1
@_except_retry
def set_add_values(self, *data):
# 返回添加成功的数量
return self._conn.sadd(self.collname, *data)
@_except_retry
def set_remove(self, *data):
# 删除多个值
return self._conn.srem(self.collname, *data)
@_except_retry
def set_count(self, collname=None):
return self._conn.scard(collname or self.collname)
@_except_retry
def set_has(self, value):
# 是否存在某个值
return self._conn.sismember(self.collname, value)
@_except_retry
def set_get_datas(self):
# 返回一个set, 包含redis中该set的所有数据
return self._conn.smembers(self.collname)
@_except_retry
def set_iter(self):
# 迭代返回一个set中所有的数据
return self._conn.sscan_iter(self.collname)
# endregion
# region 哈希操作
@_except_retry
def hash_set(self, key, value):
# 设置数据, 返回0表示修改,返回1表示创建
return self._conn.hset(self.collname, key, value)
@_except_retry
def hash_set_values(self, mapping: dict):
# 设置多个数据, 返回是否成功
return self._conn.hmset(self.collname, mapping)
@_except_retry
def hash_get(self, key):
# 获取一个key的值, 失败返回None
return self._conn.hget(self.collname, key)
@_except_retry
def hash_remove(self, *keys):
# 删除多个键, 返回实际删除数量
return self._conn.hdel(self.collname, *keys)
@_except_retry
def hash_incrby(self, key, amount=1):
# 自增, 返回自增后的值
return self._conn.hincrby(self.collname, key, amount=amount)
@_except_retry
def hash_count(self, collname=None):
return self._conn.hlen(collname or self.collname)
@_except_retry
def hash_has(self, key):
# 是否存在某个键
return self._conn.hexists(self.collname, key)
@_except_retry
def hash_keys(self):
# 返回字典中所有的key
return self._conn.hkeys(self.collname)
@_except_retry
def hash_get_datas(self):
# 返回一个字典, 包含redis中该hash中的所有数据
return self._conn.hgetall(self.collname)
@_except_retry
def hash_iter(self):
# 迭代返回一个hash中所有的数据, 每次返回的是一个元组(键, 值)
return self._conn.hscan_iter(self.collname)
# endregion | zredis | /zredis-0.1.0-py3-none-any.whl/zredis.py | zredis.py |
# Zrelay HAT
Zrelay HAT is a 3 Relay module without utilizing any of the raspberry pi zero's GPIO pins.
Installation guide for ZRelay
<!-- This is the command to control [4-Relay Stackable Card for Raspberry Pi]-->
<!-- ## Install
Don't forget to enable I2C communication:
```bash
~$ sudo raspi-config
``` -->
## Install
```bash
~$ sudo apt-get update
~$ sudo apt-get install build-essential python-pip python-dev python-smbus git
# ~$ git clone https://github.com/SequentMicrosystems/4relay-rpi.git
~$ sudo python setup.py install
```
## Update
```bash
~$ git pull
~$ sudo python setup.py install
```
## Usage
```bash
~$ python3
Python 3.5.3 (default, Sep 27 2018, 17:25:39)
[GCC 6.3.0 20170516] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import zrelay
>>> zrelay.set_zrelay_state(0, 1, 1)
0
>>>
```
## Functions
### set_zrelay_state(stack_level , relay_num , state)
Set state of any one relay at a time.
stack_level - stack level can be selected from selecting address jumpers. Stack can be between 0 to 7.
relay_num - Can be from 1 to 3 for each stack.
state - 1: turn ON,
0: turn OFF
Function will raise ValueError if stack_level is not between 0 to 7 and relay_num not between 1 to 3.
eg.
<!-- To turn ON relay 2 of stack 1 (i2C address- 19) -->
>>> set_zrelay_state(1, 2, 1)
### set_all_zrelay_state(stack_level, state)
Set all relays state of a stack.
stack_level - stack level can be selected from selecting address jumpers. Stack can be between 0 to 7.
state - 1: turn ON,
0: turn OFF
Function will raise ValueError if stack_level is not between 0 to 7.
eg.
<!-- To turn ON all relays of stack 1 (i2C address- 19) -->
>>> set_zrelay_state(1, 2, 1)
### get_zrelay_state(stack_level, relay_num)
Get state of any one relay at a time.
stack_level - stack level can be selected from selecting address jumpers. Stack can be between 0 to 7.
relay_num - Can be from 1 to 3 for each stack.
Function will return 0 if specified relay is OFF and 1 if relay is ON.
It will raise ValueError if stack_level is not between 0 to 7 and relay_num not between 1 to 3.
eg.
<!-- To get state of relay 2 of stack 1 (i2C address- 19) -->
>>> get_zrelay_state(1, 2)
1
### get_all_zrelay_state(stack_level)
Get all relays state of a stack.
stack_level - stack level can be selected from selecting address jumpers. Stack can be between 0 to 7.
Function will return 3 bit string value, each bit representing ON/OFF state of that particular relay. Relay numbers are from Right to Left, where Relay 1 is the MSB.
It will raise ValueError if stack_level is not between 0 to 7.
eg.
<!-- To get state of stack 1 (i2C address- 19) -->
>>> get_all_zrelay_state(1)
'101'
| zrelay | /zrelay-0.0.2.tar.gz/zrelay-0.0.2/README.md | README.md |
import copy
import os
import socketserver
import UDP
from core import Tools, Router, ThreadPool
from core.Router import ROOT_ROUTER_PATH
if not os.path.exists(ROOT_ROUTER_PATH):
os.mkdir(ROOT_ROUTER_PATH)
def add_router(**params):
# 创建一个路由器实例
router = Router.Router(name=params['name'], ip_address=params['ip'])
# 向此路由表添加一条新路由信息
router.get_router_list().add_router_info(data={'target': params['target'],
'distance': params['distance'],
'next': params['next']})
# 保存此路由表
router.get_router_list().save_router_list()
# 日志显示
Tools.show_log(log_data=params['log_show'],
data=Tools.get_now_time(0) + '新建路由\n')
# 路由信息显示
show_data = Tools.get_now_time(0) + '新建路由\n' + ' 路由器名称:' + \
router.get_router_name() + ' 路由器地址:' + router.get_router_ip_address()[0] + ':' + \
str(router.get_router_ip_address()[1]) + '\n' + ' ' + '路由表:\n' + ' ' + \
'\n '.join([router_data['target'] + ' ' + router_data['distance'] +
' ' + router_data['next'] for router_data
in router.get_router_list().get_router_info()]) + '\n'
Tools.show_router_info(route_data=params['router_show'], data=show_data)
def update_router(**params):
# 更新全部路由表
# 获得相邻路由器表
all_near_router = Tools.get_all_near_router(Router.ROOT_ROUTER_PATH)
# 创建线程池
pool = ThreadPool.Pool(max_workers=2)
# 设置连接列表
socket_list = []
# 全部路由器
all_router = Tools.get_all_router(Router.ROOT_ROUTER_PATH)
copy_all_router = copy.deepcopy(all_router)
# 更新提示
Tools.show_router_info(route_data=params['router_show'],
data=Tools.get_now_time(0) + '\n' +
'==============更新路由==============\n')
# 开始发送
for router in all_router:
ip_port = router.get_router_ip_address()
# 建立多线程UDP服务端
print("IP_PORT:" + str(ip_port))
udp_sever = socketserver.ThreadingUDPServer(ip_port, UDP.Sever.Server)
socket_list.append(udp_sever)
for near_router in router.get_near_router(all_near_router):
# 添加子线程
pool.add_method(method=near_router.send_router_list, sever=udp_sever,
host=ip_port[0], port=ip_port[1],
path=router.get_router_path() + '/' + near_router.get_router_name() + '.rl')
Tools.show_router_info(route_data=params['router_show'],
data=Tools.get_now_time(0) +
near_router.get_router_name() + '向' +
router.get_router_name() + '发送了路由表\n')
# 等待子线程全部运行结束
pool.wait()
# 关闭所有连接
for soc in socket_list:
soc.server_close()
# 进行路由表更新
for router in copy_all_router:
router.update_router_list()
# 日志显示
Tools.show_log(log_data=params['log_show'],
data=Tools.get_now_time(0) + '更新路由\n')
# 路由信息显示
data1 = Tools.get_now_time(0) + '更新后路由表\n'
data2 = ''
for router in copy_all_router:
data2 = data2 + ' 路由器名称:' + \
router.get_router_name() + ' 路由器地址:' + router.get_router_ip_address()[0] + ':' + \
str(router.get_router_ip_address()[1]) + '\n' + ' ' + '路由表:\n' + ' ' + \
'\n '.join([router_data['target'] + ' ' + str(router_data['distance']) +
' ' + router_data['next'] for router_data
in router.get_router_list().get_router_info()]) + '\n'
Tools.show_router_info(route_data=params['router_show'],
data=data1 + data2)
def update_step_router(**params):
# 更新全部路由表
# # 获得相邻路由器表
# all_near_router = Tools.get_all_near_router(Router.ROOT_ROUTER_PATH)
# # 创建线程池
pool = ThreadPool.Pool(max_workers=1)
# # 设置连接列表
socket_list = []
# # 全部路由器
# all_router = Tools.get_all_router(Router.ROOT_ROUTER_PATH)
# copy_all_router = copy.deepcopy(all_router)
# 更新提示
Tools.show_router_info(route_data=params['router_show'],
data=Tools.get_now_time(0) + '\n' +
'==============更新路由==============\n')
# 开始发送
# for router in Step.all_router:
Step.update()
if (Step.step_index >= Step.all_router.__len__()):
Step.step_index=0
router = Step.all_router[Step.step_index]
ip_port = router.get_router_ip_address()
# 建立多线程UDP服务端
print("IP_PORT:" + str(ip_port))
udp_sever = socketserver.ThreadingUDPServer(ip_port, UDP.Sever.Server)
socket_list.append(udp_sever)
for near_router in router.get_near_router(Step.all_near_router):
# 添加子线程
pool.add_method(method=near_router.send_router_list, sever=udp_sever,
host=ip_port[0], port=ip_port[1],
path=router.get_router_path() + '/' + near_router.get_router_name() + '.rl')
Tools.show_router_info(route_data=params['router_show'],
data=Tools.get_now_time(0) +
near_router.get_router_name() + '向' +
router.get_router_name() + '发送了路由表\n')
# 等待子线程全部运行结束
pool.wait()
# 关闭所有连接
for soc in socket_list:
soc.server_close()
# 进行路由表更新
for router in Step.copy_all_router:
router.update_router_list()
# 日志显示
Tools.show_log(log_data=params['log_show'],
data=Tools.get_now_time(0) + '更新路由\n')
# 路由信息显示
data1 = Tools.get_now_time(0) + '更新后路由表\n'
data2 = ''
for router in Step.copy_all_router:
data2 = data2 + ' 路由器名称:' + \
router.get_router_name() + ' 路由器地址:' + router.get_router_ip_address()[0] + ':' + \
str(router.get_router_ip_address()[1]) + '\n' + ' ' + '路由表:\n' + ' ' + \
'\n '.join([router_data['target'] + ' ' + str(router_data['distance']) +
' ' + router_data['next'] for router_data
in router.get_router_list().get_router_info()]) + '\n'
Step.step_index = Step.step_index + 1
Tools.show_router_info(route_data=params['router_show'],
data=data1 + data2)
def fault_test(**params):
Tools.show_router_info(route_data=params['router_show'],
data=Tools.get_now_time(0) +
'\n==============网络故障==============\n')
Tools.show_router_info(route_data=params['router_show'],
data=' 故障网络: ' + params['fault'] + '\n')
# 使指定网络故障
for router in Tools.get_all_router(Router.ROOT_ROUTER_PATH):
sign = 0 # 标志位
for router_info in [router_info for router_info in router.get_router_list().get_router_info()
if (router_info['target'] == params['fault'])
and (int(router_info['distance']) == 1)]:
router_info['distance'] = '16'
# 路由信息显示
data = ' 路由器名称:' + router.get_router_name() + \
' 路由器地址:' + router.get_router_ip_address()[0] + ':' + str(router.get_router_ip_address()[1]) + \
'\n' + ' ' + ' 路由表:\n' + \
' ' + '\n '.join([router_data['target'] +
' ' + router_data['distance']
+ ' ' + router_data['next']
for router_data in router.get_router_list().get_router_info()]) + '\n'
Tools.show_router_info(route_data=params['router_show'], data=data)
sign = 1
break
if sign == 1:
# 保存路由表
router.get_router_list().save_router_list()
# 日志显示
Tools.show_log(log_data=params['log_show'], data=Tools.get_now_time(0) + '网络故障\n')
class Step:
# 获得相邻路由器表
all_near_router = Tools.get_all_near_router(Router.ROOT_ROUTER_PATH)
# 创建线程池
pool = ThreadPool.Pool(max_workers=2)
# 设置连接列表
socket_list = []
# 全部路由器
all_router = Tools.get_all_router(Router.ROOT_ROUTER_PATH)
copy_all_router = copy.deepcopy(all_router)
step_index = 0
@classmethod
def update(cls):
# 获得相邻路由器表
cls.all_near_router = Tools.get_all_near_router(Router.ROOT_ROUTER_PATH)
# 设置连接列表
cls.socket_list = []
# 全部路由器
cls.all_router = Tools.get_all_router(Router.ROOT_ROUTER_PATH)
cls.copy_all_router = copy.deepcopy(cls.all_router)
pass | zrip | /zrip-1.4-py3-none-any.whl/core/Event.py | Event.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.