body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
b6c137c7b44ca3bf231f95025762f26405e517ef1c07fd81da07be6c5edacd5e
def ToMixedPrecision(mixed_precision_type='float16', missing_op_mode=1): '\n Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version\n where as many operations as possible are in the target mixed_precision_type.\n\n Parameters\n ----------\n mixed_precision_type: str\n The target datatype to transform operations in the graph to use.\n\n missing_op_mode: int\n Determines how to handle ops not registered with FTVMMixedPrecisionConversionType\n 0: Does not allow any missing ops. Will throw errors when encountering any.\n 1: Allow missing ops but emit warnings.\n 2: Allow missing ops and silently ignore them.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass.\n ' if ((missing_op_mode < 0) or (missing_op_mode > 2)): raise ValueError('Missing op mode is either 0, 1, or 2') return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode)
Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version where as many operations as possible are in the target mixed_precision_type. Parameters ---------- mixed_precision_type: str The target datatype to transform operations in the graph to use. missing_op_mode: int Determines how to handle ops not registered with FTVMMixedPrecisionConversionType 0: Does not allow any missing ops. Will throw errors when encountering any. 1: Allow missing ops but emit warnings. 2: Allow missing ops and silently ignore them. Returns ------- ret : tvm.transform.Pass The registered pass.
python/tvm/relay/transform/transform.py
ToMixedPrecision
ryanstout/tvm
2,084
python
def ToMixedPrecision(mixed_precision_type='float16', missing_op_mode=1): '\n Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version\n where as many operations as possible are in the target mixed_precision_type.\n\n Parameters\n ----------\n mixed_precision_type: str\n The target datatype to transform operations in the graph to use.\n\n missing_op_mode: int\n Determines how to handle ops not registered with FTVMMixedPrecisionConversionType\n 0: Does not allow any missing ops. Will throw errors when encountering any.\n 1: Allow missing ops but emit warnings.\n 2: Allow missing ops and silently ignore them.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass.\n ' if ((missing_op_mode < 0) or (missing_op_mode > 2)): raise ValueError('Missing op mode is either 0, 1, or 2') return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode)
def ToMixedPrecision(mixed_precision_type='float16', missing_op_mode=1): '\n Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version\n where as many operations as possible are in the target mixed_precision_type.\n\n Parameters\n ----------\n mixed_precision_type: str\n The target datatype to transform operations in the graph to use.\n\n missing_op_mode: int\n Determines how to handle ops not registered with FTVMMixedPrecisionConversionType\n 0: Does not allow any missing ops. Will throw errors when encountering any.\n 1: Allow missing ops but emit warnings.\n 2: Allow missing ops and silently ignore them.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass.\n ' if ((missing_op_mode < 0) or (missing_op_mode > 2)): raise ValueError('Missing op mode is either 0, 1, or 2') return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode)<|docstring|>Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version where as many operations as possible are in the target mixed_precision_type. Parameters ---------- mixed_precision_type: str The target datatype to transform operations in the graph to use. missing_op_mode: int Determines how to handle ops not registered with FTVMMixedPrecisionConversionType 0: Does not allow any missing ops. Will throw errors when encountering any. 1: Allow missing ops but emit warnings. 2: Allow missing ops and silently ignore them. Returns ------- ret : tvm.transform.Pass The registered pass.<|endoftext|>
fa650b45e359f6702edf0f56f463969041a6f2b86c9cff893ece195bf3dae3cb
def SplitArgs(max_function_args): 'Split function with huge number of arguments to smaller pieces.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass for constant folding.\n ' return _ffi_api.SplitArgs(max_function_args)
Split function with huge number of arguments to smaller pieces. Returns ------- ret : tvm.transform.Pass The registered pass for constant folding.
python/tvm/relay/transform/transform.py
SplitArgs
ryanstout/tvm
2,084
python
def SplitArgs(max_function_args): 'Split function with huge number of arguments to smaller pieces.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass for constant folding.\n ' return _ffi_api.SplitArgs(max_function_args)
def SplitArgs(max_function_args): 'Split function with huge number of arguments to smaller pieces.\n\n Returns\n -------\n ret : tvm.transform.Pass\n The registered pass for constant folding.\n ' return _ffi_api.SplitArgs(max_function_args)<|docstring|>Split function with huge number of arguments to smaller pieces. Returns ------- ret : tvm.transform.Pass The registered pass for constant folding.<|endoftext|>
4e11ba26e6553ad37db21277518d63636c70087e2468fc68b830a997e977f30b
def create_function_pass(pass_arg): 'Internal function that creates a function pass' fname = (name if name else pass_arg.__name__) info = tvm.transform.PassInfo(opt_level, fname, required) if inspect.isclass(pass_arg): return _wrap_class_function_pass(pass_arg, info) if (not isinstance(pass_arg, (types.FunctionType, types.LambdaType))): raise TypeError('pass_func must be a callable for Module pass') return _ffi_api.MakeFunctionPass(pass_arg, info)
Internal function that creates a function pass
python/tvm/relay/transform/transform.py
create_function_pass
ryanstout/tvm
2,084
python
def create_function_pass(pass_arg): fname = (name if name else pass_arg.__name__) info = tvm.transform.PassInfo(opt_level, fname, required) if inspect.isclass(pass_arg): return _wrap_class_function_pass(pass_arg, info) if (not isinstance(pass_arg, (types.FunctionType, types.LambdaType))): raise TypeError('pass_func must be a callable for Module pass') return _ffi_api.MakeFunctionPass(pass_arg, info)
def create_function_pass(pass_arg): fname = (name if name else pass_arg.__name__) info = tvm.transform.PassInfo(opt_level, fname, required) if inspect.isclass(pass_arg): return _wrap_class_function_pass(pass_arg, info) if (not isinstance(pass_arg, (types.FunctionType, types.LambdaType))): raise TypeError('pass_func must be a callable for Module pass') return _ffi_api.MakeFunctionPass(pass_arg, info)<|docstring|>Internal function that creates a function pass<|endoftext|>
ebf49680dc3a9339e745d2d4982fc2310655034f159e5fbb04a8d887eab792be
def makedirs(config): 'Create directories if not exist.' if (not os.path.exists(config.log_dir)): os.makedirs(config.log_dir) if (not os.path.exists(config.model_save_dir)): os.makedirs(config.model_save_dir) if (not os.path.exists(config.sample_dir)): os.makedirs(config.sample_dir) if (not os.path.exists(config.result_dir)): os.makedirs(config.result_dir)
Create directories if not exist.
torchbenchmark/models/pytorch_stargan/main.py
makedirs
ramiro050/benchmark
384
python
def makedirs(config): if (not os.path.exists(config.log_dir)): os.makedirs(config.log_dir) if (not os.path.exists(config.model_save_dir)): os.makedirs(config.model_save_dir) if (not os.path.exists(config.sample_dir)): os.makedirs(config.sample_dir) if (not os.path.exists(config.result_dir)): os.makedirs(config.result_dir)
def makedirs(config): if (not os.path.exists(config.log_dir)): os.makedirs(config.log_dir) if (not os.path.exists(config.model_save_dir)): os.makedirs(config.model_save_dir) if (not os.path.exists(config.sample_dir)): os.makedirs(config.sample_dir) if (not os.path.exists(config.result_dir)): os.makedirs(config.result_dir)<|docstring|>Create directories if not exist.<|endoftext|>
08f99e65fa1fec8ca5b82e9cb35f5f02c05c87ab481de3ee039604f8bcb6796e
def __init__(self, dungeon_type, dungeon_num) -> None: 'コンストラクタ\n lv,hp,atk,skill[0]~[2],exp,moneyはリストとなっており、[0]がプレイヤーの値、[1]が敵の値\n ' self.ans = ['12345', 'abcde'] self.turn_count = [0, 0] self.hist = [[], []] self.max_ans = 16 self.length = 5 self.dungeon_type = dungeon_type self.dungeon_num = dungeon_num self.book = openpyxl.load_workbook('systems/base.xlsx', data_only=True) self.mobname = self.vlookup(self.choose_dungeon(), str(self.dungeon_num), 3) self.mobsheet = self.book[self.mobname] self.mysheet = self.book['プレイヤーステータス'] self.vs_sheet = [self.mysheet, self.mobsheet] self.lv = [0, 0] self.hp = [0, 0] self.atk = [0, 0] self.skill = [[0, 0] for j in range(3)] self.exp = [0, 0] self.money = [0, 0] for (num, sheet) in enumerate(self.vs_sheet): self.lv[num] = self.vlookup(sheet, 'lv', 2) self.hp[num] = self.vlookup(sheet, 'hp', 2) self.atk[num] = self.vlookup(sheet, 'atk', 2) for j in range(3): self.skill[j][num] = self.vlookup(sheet, 'skill{}'.format(j), 2) self.exp[num] = self.vlookup(sheet, 'exp', 2) self.money[num] = self.vlookup(sheet, 'money', 2) self.droplist = [] print('野生の{}が現れた!'.format(self.mobname))
コンストラクタ lv,hp,atk,skill[0]~[2],exp,moneyはリストとなっており、[0]がプレイヤーの値、[1]が敵の値
game_system/systems/boss.py
__init__
TakenokoEmpire/mypj
0
python
def __init__(self, dungeon_type, dungeon_num) -> None: 'コンストラクタ\n lv,hp,atk,skill[0]~[2],exp,moneyはリストとなっており、[0]がプレイヤーの値、[1]が敵の値\n ' self.ans = ['12345', 'abcde'] self.turn_count = [0, 0] self.hist = [[], []] self.max_ans = 16 self.length = 5 self.dungeon_type = dungeon_type self.dungeon_num = dungeon_num self.book = openpyxl.load_workbook('systems/base.xlsx', data_only=True) self.mobname = self.vlookup(self.choose_dungeon(), str(self.dungeon_num), 3) self.mobsheet = self.book[self.mobname] self.mysheet = self.book['プレイヤーステータス'] self.vs_sheet = [self.mysheet, self.mobsheet] self.lv = [0, 0] self.hp = [0, 0] self.atk = [0, 0] self.skill = [[0, 0] for j in range(3)] self.exp = [0, 0] self.money = [0, 0] for (num, sheet) in enumerate(self.vs_sheet): self.lv[num] = self.vlookup(sheet, 'lv', 2) self.hp[num] = self.vlookup(sheet, 'hp', 2) self.atk[num] = self.vlookup(sheet, 'atk', 2) for j in range(3): self.skill[j][num] = self.vlookup(sheet, 'skill{}'.format(j), 2) self.exp[num] = self.vlookup(sheet, 'exp', 2) self.money[num] = self.vlookup(sheet, 'money', 2) self.droplist = [] print('野生の{}が現れた!'.format(self.mobname))
def __init__(self, dungeon_type, dungeon_num) -> None: 'コンストラクタ\n lv,hp,atk,skill[0]~[2],exp,moneyはリストとなっており、[0]がプレイヤーの値、[1]が敵の値\n ' self.ans = ['12345', 'abcde'] self.turn_count = [0, 0] self.hist = [[], []] self.max_ans = 16 self.length = 5 self.dungeon_type = dungeon_type self.dungeon_num = dungeon_num self.book = openpyxl.load_workbook('systems/base.xlsx', data_only=True) self.mobname = self.vlookup(self.choose_dungeon(), str(self.dungeon_num), 3) self.mobsheet = self.book[self.mobname] self.mysheet = self.book['プレイヤーステータス'] self.vs_sheet = [self.mysheet, self.mobsheet] self.lv = [0, 0] self.hp = [0, 0] self.atk = [0, 0] self.skill = [[0, 0] for j in range(3)] self.exp = [0, 0] self.money = [0, 0] for (num, sheet) in enumerate(self.vs_sheet): self.lv[num] = self.vlookup(sheet, 'lv', 2) self.hp[num] = self.vlookup(sheet, 'hp', 2) self.atk[num] = self.vlookup(sheet, 'atk', 2) for j in range(3): self.skill[j][num] = self.vlookup(sheet, 'skill{}'.format(j), 2) self.exp[num] = self.vlookup(sheet, 'exp', 2) self.money[num] = self.vlookup(sheet, 'money', 2) self.droplist = [] print('野生の{}が現れた!'.format(self.mobname))<|docstring|>コンストラクタ lv,hp,atk,skill[0]~[2],exp,moneyはリストとなっており、[0]がプレイヤーの値、[1]が敵の値<|endoftext|>
4964595e05db089d3e229e690c6cfe79fdcc044caccce5ab0ddf10a55070b57d
def vlookup(self, sheet, index, column_order): 'excelのvlookup関数を再現。\n 「完全一致」のみ。\n ' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): output = sheet.cell(row=(i + 1), column=column_order).value return output return 'False'
excelのvlookup関数を再現。 「完全一致」のみ。
game_system/systems/boss.py
vlookup
TakenokoEmpire/mypj
0
python
def vlookup(self, sheet, index, column_order): 'excelのvlookup関数を再現。\n 「完全一致」のみ。\n ' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): output = sheet.cell(row=(i + 1), column=column_order).value return output return 'False'
def vlookup(self, sheet, index, column_order): 'excelのvlookup関数を再現。\n 「完全一致」のみ。\n ' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): output = sheet.cell(row=(i + 1), column=column_order).value return output return 'False'<|docstring|>excelのvlookup関数を再現。 「完全一致」のみ。<|endoftext|>
eaef01d308fa89381d7f8c7b24af79dbb789c025d2c04e502c2fea5408d4d9ce
def x_index(self, sheet, index): 'excelのindex関数もどきを再現\n 該当する文字がある行番号を返す(先頭列に限る)' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): return (i + 1) return 'False'
excelのindex関数もどきを再現 該当する文字がある行番号を返す(先頭列に限る)
game_system/systems/boss.py
x_index
TakenokoEmpire/mypj
0
python
def x_index(self, sheet, index): 'excelのindex関数もどきを再現\n 該当する文字がある行番号を返す(先頭列に限る)' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): return (i + 1) return 'False'
def x_index(self, sheet, index): 'excelのindex関数もどきを再現\n 該当する文字がある行番号を返す(先頭列に限る)' for i in range(500): output = 0 if (str(sheet.cell(row=(i + 1), column=1).value) == index): return (i + 1) return 'False'<|docstring|>excelのindex関数もどきを再現 該当する文字がある行番号を返す(先頭列に限る)<|endoftext|>
10f946eda5d7eb2677fa236b44f9ffbcbb248a51f2d78dd4a47f9f9e326025e3
def after_battle(self): '戦闘後の処理\n ステータスの更新\n 経験値の更新\n 金の更新\n アイテムの更新\n 以上の情報をエクセルファイルに更新・保存\n ' new_lv = self.lv[0] new_hp = self.vlookup(self.book['level_table'], str(new_lv), 3) new_atk = self.vlookup(self.book['level_table'], str(new_lv), 4) self.mysheet.cell(row=self.x_index(self.mysheet, 'lv'), column=2, value=new_lv) self.mysheet.cell(row=self.x_index(self.mysheet, 'hp'), column=2, value=new_hp) self.mysheet.cell(row=self.x_index(self.mysheet, 'atk'), column=2, value=new_atk) self.mysheet.cell(row=self.x_index(self.mysheet, 'exp'), column=2, value=self.exp[0]) self.mysheet.cell(row=self.x_index(self.mysheet, 'money'), column=2, value=self.money[0]) if (self.x_index(self.book['道具箱'], self.droplist[0]) == 'False'): self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=2, value=(self.vlookup(self.book['道具箱'], 'empty', 2) + 1)) self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=1, value=self.droplist[0]) else: self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], self.droplist[0]), column=2, value=(self.vlookup(self.book['道具箱'], self.droplist[0], 2) + 1)) print(self.x_index(self.book['道具箱'], 'empty')) '開いてる途中だとエラー出るよ' try: self.book.save('systems/base.xlsx') except PermissionError: print('エクセルファイルを閉じてください') exit()
戦闘後の処理 ステータスの更新 経験値の更新 金の更新 アイテムの更新 以上の情報をエクセルファイルに更新・保存
game_system/systems/boss.py
after_battle
TakenokoEmpire/mypj
0
python
def after_battle(self): '戦闘後の処理\n ステータスの更新\n 経験値の更新\n 金の更新\n アイテムの更新\n 以上の情報をエクセルファイルに更新・保存\n ' new_lv = self.lv[0] new_hp = self.vlookup(self.book['level_table'], str(new_lv), 3) new_atk = self.vlookup(self.book['level_table'], str(new_lv), 4) self.mysheet.cell(row=self.x_index(self.mysheet, 'lv'), column=2, value=new_lv) self.mysheet.cell(row=self.x_index(self.mysheet, 'hp'), column=2, value=new_hp) self.mysheet.cell(row=self.x_index(self.mysheet, 'atk'), column=2, value=new_atk) self.mysheet.cell(row=self.x_index(self.mysheet, 'exp'), column=2, value=self.exp[0]) self.mysheet.cell(row=self.x_index(self.mysheet, 'money'), column=2, value=self.money[0]) if (self.x_index(self.book['道具箱'], self.droplist[0]) == 'False'): self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=2, value=(self.vlookup(self.book['道具箱'], 'empty', 2) + 1)) self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=1, value=self.droplist[0]) else: self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], self.droplist[0]), column=2, value=(self.vlookup(self.book['道具箱'], self.droplist[0], 2) + 1)) print(self.x_index(self.book['道具箱'], 'empty')) '開いてる途中だとエラー出るよ' try: self.book.save('systems/base.xlsx') except PermissionError: print('エクセルファイルを閉じてください') exit()
def after_battle(self): '戦闘後の処理\n ステータスの更新\n 経験値の更新\n 金の更新\n アイテムの更新\n 以上の情報をエクセルファイルに更新・保存\n ' new_lv = self.lv[0] new_hp = self.vlookup(self.book['level_table'], str(new_lv), 3) new_atk = self.vlookup(self.book['level_table'], str(new_lv), 4) self.mysheet.cell(row=self.x_index(self.mysheet, 'lv'), column=2, value=new_lv) self.mysheet.cell(row=self.x_index(self.mysheet, 'hp'), column=2, value=new_hp) self.mysheet.cell(row=self.x_index(self.mysheet, 'atk'), column=2, value=new_atk) self.mysheet.cell(row=self.x_index(self.mysheet, 'exp'), column=2, value=self.exp[0]) self.mysheet.cell(row=self.x_index(self.mysheet, 'money'), column=2, value=self.money[0]) if (self.x_index(self.book['道具箱'], self.droplist[0]) == 'False'): self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=2, value=(self.vlookup(self.book['道具箱'], 'empty', 2) + 1)) self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], 'empty'), column=1, value=self.droplist[0]) else: self.book['道具箱'].cell(row=self.x_index(self.book['道具箱'], self.droplist[0]), column=2, value=(self.vlookup(self.book['道具箱'], self.droplist[0], 2) + 1)) print(self.x_index(self.book['道具箱'], 'empty')) '開いてる途中だとエラー出るよ' try: self.book.save('systems/base.xlsx') except PermissionError: print('エクセルファイルを閉じてください') exit()<|docstring|>戦闘後の処理 ステータスの更新 経験値の更新 金の更新 アイテムの更新 以上の情報をエクセルファイルに更新・保存<|endoftext|>
02df5a0b305666f620331c2ada8ef5f48f683419355754f80d540f56280ef4ba
@pytest.mark.settings(redirect_host='localhost:8081', redirect_behavior='spa', post_login_view='/login-redirect') def test_spa_get(app, client): "\n Test 'single-page-application' style redirects\n This uses json only.\n " with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-redirect' == split.path) qparams = dict(parse_qsl(split.query)) assert (qparams['email'] == '[email protected]') assert (len(flashes) == 0)
Test 'single-page-application' style redirects This uses json only.
tests/test_passwordless.py
test_spa_get
biofool/flask-security
317
python
@pytest.mark.settings(redirect_host='localhost:8081', redirect_behavior='spa', post_login_view='/login-redirect') def test_spa_get(app, client): "\n Test 'single-page-application' style redirects\n This uses json only.\n " with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-redirect' == split.path) qparams = dict(parse_qsl(split.query)) assert (qparams['email'] == '[email protected]') assert (len(flashes) == 0)
@pytest.mark.settings(redirect_host='localhost:8081', redirect_behavior='spa', post_login_view='/login-redirect') def test_spa_get(app, client): "\n Test 'single-page-application' style redirects\n This uses json only.\n " with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-redirect' == split.path) qparams = dict(parse_qsl(split.query)) assert (qparams['email'] == '[email protected]') assert (len(flashes) == 0)<|docstring|>Test 'single-page-application' style redirects This uses json only.<|endoftext|>
910dec77783c9a862ff163c8359ba6a090615f2b8be22d50ab6bd4c67aaff1f4
@pytest.mark.settings(login_within='1 milliseconds', redirect_host='localhost:8081', redirect_behavior='spa', login_error_view='/login-error') def test_spa_get_bad_token(app, client, get_message): 'Test expired and invalid token' with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] time.sleep(1) response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert all(((k in qparams) for k in ['email', 'error', 'identity'])) msg = get_message('LOGIN_EXPIRED', within='1 milliseconds', email='[email protected]') assert (msg == qparams['error'].encode('utf-8')) token = 'WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd.BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d' response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert (len(qparams) == 1) assert all(((k in qparams) for k in ['error'])) msg = get_message('INVALID_LOGIN_TOKEN') assert (msg == qparams['error'].encode('utf-8')) assert (len(flashes) == 0)
Test expired and invalid token
tests/test_passwordless.py
test_spa_get_bad_token
biofool/flask-security
317
python
@pytest.mark.settings(login_within='1 milliseconds', redirect_host='localhost:8081', redirect_behavior='spa', login_error_view='/login-error') def test_spa_get_bad_token(app, client, get_message): with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] time.sleep(1) response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert all(((k in qparams) for k in ['email', 'error', 'identity'])) msg = get_message('LOGIN_EXPIRED', within='1 milliseconds', email='[email protected]') assert (msg == qparams['error'].encode('utf-8')) token = 'WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd.BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d' response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert (len(qparams) == 1) assert all(((k in qparams) for k in ['error'])) msg = get_message('INVALID_LOGIN_TOKEN') assert (msg == qparams['error'].encode('utf-8')) assert (len(flashes) == 0)
@pytest.mark.settings(login_within='1 milliseconds', redirect_host='localhost:8081', redirect_behavior='spa', login_error_view='/login-error') def test_spa_get_bad_token(app, client, get_message): with capture_flashes() as flashes: with capture_passwordless_login_requests() as requests: response = client.post('/login', json=dict(email='[email protected]'), headers={'Content-Type': 'application/json'}) assert (response.headers['Content-Type'] == 'application/json') token = requests[0]['login_token'] time.sleep(1) response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert all(((k in qparams) for k in ['email', 'error', 'identity'])) msg = get_message('LOGIN_EXPIRED', within='1 milliseconds', email='[email protected]') assert (msg == qparams['error'].encode('utf-8')) token = 'WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd.BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d' response = client.get(('/login/' + token)) assert (response.status_code == 302) split = urlsplit(response.headers['Location']) assert ('localhost:8081' == split.netloc) assert ('/login-error' == split.path) qparams = dict(parse_qsl(split.query)) assert (len(qparams) == 1) assert all(((k in qparams) for k in ['error'])) msg = get_message('INVALID_LOGIN_TOKEN') assert (msg == qparams['error'].encode('utf-8')) assert (len(flashes) == 0)<|docstring|>Test expired and invalid token<|endoftext|>
31569f5b4c66b2d86fd2ba6aec365648f73ea086b201048eecca63267af95f54
def test_pull_as_echo_argument(call_centos7, capstrip): '--pull should only be interpreted as an dog argument if it comes before the first argument' call_centos7('echo', '-n', '--pull') captured = capstrip.get() assert (captured == ('--pull', ''))
--pull should only be interpreted as an dog argument if it comes before the first argument
tests/test_dog_arguments.py
test_pull_as_echo_argument
marqvart/dog
2
python
def test_pull_as_echo_argument(call_centos7, capstrip): call_centos7('echo', '-n', '--pull') captured = capstrip.get() assert (captured == ('--pull', ))
def test_pull_as_echo_argument(call_centos7, capstrip): call_centos7('echo', '-n', '--pull') captured = capstrip.get() assert (captured == ('--pull', ))<|docstring|>--pull should only be interpreted as an dog argument if it comes before the first argument<|endoftext|>
5c5fa394fed362d20e559126fc3146e1e74bd91b76b1bcdbdf22df3d946487b8
def test_version(call_dog, capfd): '--version should just return the current dog version' call_dog('--version') captured = capfd.readouterr() assert re.match('dog version [0-9]+', captured.out)
--version should just return the current dog version
tests/test_dog_arguments.py
test_version
marqvart/dog
2
python
def test_version(call_dog, capfd): call_dog('--version') captured = capfd.readouterr() assert re.match('dog version [0-9]+', captured.out)
def test_version(call_dog, capfd): call_dog('--version') captured = capfd.readouterr() assert re.match('dog version [0-9]+', captured.out)<|docstring|>--version should just return the current dog version<|endoftext|>
50da369323770c478dc3039a02b18306b3dc3fe6d6b58d1bcdc521b84b74b7b5
def test_verbose(call_centos7, capfd): '--verbose should report the actual setup' call_centos7('--verbose', 'id') captured = capfd.readouterr() assert ('Dog Config' in captured.out) assert ("'verbose': True" in captured.out)
--verbose should report the actual setup
tests/test_dog_arguments.py
test_verbose
marqvart/dog
2
python
def test_verbose(call_centos7, capfd): call_centos7('--verbose', 'id') captured = capfd.readouterr() assert ('Dog Config' in captured.out) assert ("'verbose': True" in captured.out)
def test_verbose(call_centos7, capfd): call_centos7('--verbose', 'id') captured = capfd.readouterr() assert ('Dog Config' in captured.out) assert ("'verbose': True" in captured.out)<|docstring|>--verbose should report the actual setup<|endoftext|>
8ebc17e622c7753cb3100a78c58cf6da7e7803c0201b3a9caf75f14d0b5a8d2d
def test_stdin_testing_works(call_shell, capstrip): 'Just verifying that my stdin testing works before testing it with dog.' call_shell('echo hello world | cat -') captured = capstrip.get() assert (captured == ('hello world', ''))
Just verifying that my stdin testing works before testing it with dog.
tests/test_dog_arguments.py
test_stdin_testing_works
marqvart/dog
2
python
def test_stdin_testing_works(call_shell, capstrip): call_shell('echo hello world | cat -') captured = capstrip.get() assert (captured == ('hello world', ))
def test_stdin_testing_works(call_shell, capstrip): call_shell('echo hello world | cat -') captured = capstrip.get() assert (captured == ('hello world', ))<|docstring|>Just verifying that my stdin testing works before testing it with dog.<|endoftext|>
f9bd7a233fe5b61f96c2a9c8da605bd7fb48d5274b48c976d16dcbbf1c8e507e
def test_stdin(call_shell, capstrip, dog_env): 'stdin should be available from inside dog.' call_shell(f'echo hello world | {dog_env} cat') captured = capstrip.get() assert (captured == ('hello world', ''))
stdin should be available from inside dog.
tests/test_dog_arguments.py
test_stdin
marqvart/dog
2
python
def test_stdin(call_shell, capstrip, dog_env): call_shell(f'echo hello world | {dog_env} cat') captured = capstrip.get() assert (captured == ('hello world', ))
def test_stdin(call_shell, capstrip, dog_env): call_shell(f'echo hello world | {dog_env} cat') captured = capstrip.get() assert (captured == ('hello world', ))<|docstring|>stdin should be available from inside dog.<|endoftext|>
37e105858a366815c67e468f69766734e99bc0d9615b588931a29dbb2c6689cd
def test_auto_mount_works(call_centos7, capstrip): 'auto-mount is on by default, and should therefore show the files in the current directory.' call_centos7('ls') captured = capstrip.get() assert (captured == ('dog.config', ''))
auto-mount is on by default, and should therefore show the files in the current directory.
tests/test_dog_arguments.py
test_auto_mount_works
marqvart/dog
2
python
def test_auto_mount_works(call_centos7, capstrip): call_centos7('ls') captured = capstrip.get() assert (captured == ('dog.config', ))
def test_auto_mount_works(call_centos7, capstrip): call_centos7('ls') captured = capstrip.get() assert (captured == ('dog.config', ))<|docstring|>auto-mount is on by default, and should therefore show the files in the current directory.<|endoftext|>
5345e776e6fc9dc2efa17385b3cc71bf18dc9ec042c445c47d6dad43e2123040
def test_disabled_auto_mount(call_centos7, capstrip, tmp_path): 'disable auto-mount and make sure that we do not see the files in the current directory.' append_to_dog_config(tmp_path, ['auto-mount=False']) call_centos7('ls') captured = capstrip.get() assert (captured == ('', ''))
disable auto-mount and make sure that we do not see the files in the current directory.
tests/test_dog_arguments.py
test_disabled_auto_mount
marqvart/dog
2
python
def test_disabled_auto_mount(call_centos7, capstrip, tmp_path): append_to_dog_config(tmp_path, ['auto-mount=False']) call_centos7('ls') captured = capstrip.get() assert (captured == (, ))
def test_disabled_auto_mount(call_centos7, capstrip, tmp_path): append_to_dog_config(tmp_path, ['auto-mount=False']) call_centos7('ls') captured = capstrip.get() assert (captured == (, ))<|docstring|>disable auto-mount and make sure that we do not see the files in the current directory.<|endoftext|>
04b1c42c7ace50cea7999a366252bc4d7a735d14f0b6e5cc0cac882b613fce3c
def test_volumes(call_centos7, capstrip, tmp_path, system_temp_dir): 'Try adding the "system temp dir" as a volume in the dog.config.' append_to_dog_config(tmp_path, ['[volumes]', f'/dog_test_of_system_temp={system_temp_dir}']) call_centos7('mountpoint', '/dog_test_of_system_temp') captured = capstrip.get() assert (captured == ('/dog_test_of_system_temp is a mountpoint', ''))
Try adding the "system temp dir" as a volume in the dog.config.
tests/test_dog_arguments.py
test_volumes
marqvart/dog
2
python
def test_volumes(call_centos7, capstrip, tmp_path, system_temp_dir): append_to_dog_config(tmp_path, ['[volumes]', f'/dog_test_of_system_temp={system_temp_dir}']) call_centos7('mountpoint', '/dog_test_of_system_temp') captured = capstrip.get() assert (captured == ('/dog_test_of_system_temp is a mountpoint', ))
def test_volumes(call_centos7, capstrip, tmp_path, system_temp_dir): append_to_dog_config(tmp_path, ['[volumes]', f'/dog_test_of_system_temp={system_temp_dir}']) call_centos7('mountpoint', '/dog_test_of_system_temp') captured = capstrip.get() assert (captured == ('/dog_test_of_system_temp is a mountpoint', ))<|docstring|>Try adding the "system temp dir" as a volume in the dog.config.<|endoftext|>
4cf38f5edf20f59a5fd5f5e5483dc9a490af04eab3d19b3e44128ba59977fff6
def test_dog_config_not_found(my_dog, system_temp_dir, capfd): 'If no dog.config is found, report an error.\n\n We run this test in the system_temp_dir to be more sure that no-one has a dog.config file somewhere in the parent directories.' for parent in Path(system_temp_dir).parents: assert (not (parent / 'dog.config').exists()), f'Pre-conditions for this test failed - we expect no dog.config files in the parent directories of {system_temp_dir}' cmd_line = [DOG_PYTHON_UNDER_TEST, str(my_dog), 'echo', 'ok'] subprocess.run(cmd_line, cwd=system_temp_dir) assert ('ERROR' in capfd.readouterr().err)
If no dog.config is found, report an error. We run this test in the system_temp_dir to be more sure that no-one has a dog.config file somewhere in the parent directories.
tests/test_dog_arguments.py
test_dog_config_not_found
marqvart/dog
2
python
def test_dog_config_not_found(my_dog, system_temp_dir, capfd): 'If no dog.config is found, report an error.\n\n We run this test in the system_temp_dir to be more sure that no-one has a dog.config file somewhere in the parent directories.' for parent in Path(system_temp_dir).parents: assert (not (parent / 'dog.config').exists()), f'Pre-conditions for this test failed - we expect no dog.config files in the parent directories of {system_temp_dir}' cmd_line = [DOG_PYTHON_UNDER_TEST, str(my_dog), 'echo', 'ok'] subprocess.run(cmd_line, cwd=system_temp_dir) assert ('ERROR' in capfd.readouterr().err)
def test_dog_config_not_found(my_dog, system_temp_dir, capfd): 'If no dog.config is found, report an error.\n\n We run this test in the system_temp_dir to be more sure that no-one has a dog.config file somewhere in the parent directories.' for parent in Path(system_temp_dir).parents: assert (not (parent / 'dog.config').exists()), f'Pre-conditions for this test failed - we expect no dog.config files in the parent directories of {system_temp_dir}' cmd_line = [DOG_PYTHON_UNDER_TEST, str(my_dog), 'echo', 'ok'] subprocess.run(cmd_line, cwd=system_temp_dir) assert ('ERROR' in capfd.readouterr().err)<|docstring|>If no dog.config is found, report an error. We run this test in the system_temp_dir to be more sure that no-one has a dog.config file somewhere in the parent directories.<|endoftext|>
15d1da9abf72d8606165eeae38b732159a7f531ed84515c004f5ff0fd7a98241
@pytest.mark.skipif(('TEAMCITY_PROJECT_NAME' not in os.environ), reason='This test only works inside Demant (sorry!)') def test_pull_latest_demant_debian(basic_dog_config, call_dog, tmp_path, capstrip): 'This test only exists to pull the latest image before the next test - this is to make sure that the test does not fail\n due to lots of "pullling layer ....blah.balh... output.' append_to_dog_config(tmp_path, ['registry=artifactory.oticon.com/demant-docker-local', 'image=demant-debian:buster-20211011-slim']) call_dog('echo', 'ok')
This test only exists to pull the latest image before the next test - this is to make sure that the test does not fail due to lots of "pullling layer ....blah.balh... output.
tests/test_dog_arguments.py
test_pull_latest_demant_debian
marqvart/dog
2
python
@pytest.mark.skipif(('TEAMCITY_PROJECT_NAME' not in os.environ), reason='This test only works inside Demant (sorry!)') def test_pull_latest_demant_debian(basic_dog_config, call_dog, tmp_path, capstrip): 'This test only exists to pull the latest image before the next test - this is to make sure that the test does not fail\n due to lots of "pullling layer ....blah.balh... output.' append_to_dog_config(tmp_path, ['registry=artifactory.oticon.com/demant-docker-local', 'image=demant-debian:buster-20211011-slim']) call_dog('echo', 'ok')
@pytest.mark.skipif(('TEAMCITY_PROJECT_NAME' not in os.environ), reason='This test only works inside Demant (sorry!)') def test_pull_latest_demant_debian(basic_dog_config, call_dog, tmp_path, capstrip): 'This test only exists to pull the latest image before the next test - this is to make sure that the test does not fail\n due to lots of "pullling layer ....blah.balh... output.' append_to_dog_config(tmp_path, ['registry=artifactory.oticon.com/demant-docker-local', 'image=demant-debian:buster-20211011-slim']) call_dog('echo', 'ok')<|docstring|>This test only exists to pull the latest image before the next test - this is to make sure that the test does not fail due to lots of "pullling layer ....blah.balh... output.<|endoftext|>
df79446cbcd1a5ee485195b8e5db7bc8af18c55ad34d461d6b6bde380c560f9b
def run_experiment(): '\n Experiment I: ELIZA --> https://en.wikipedia.org/wiki/ELIZA\n ' eliza_context = 'ELIZA is an early natural language processing computer program created from 1964 to 1966 at the MIT Artificial Intelligence Laboratory by Joseph Weizenbaum. Created to demonstrate the superficiality of communication between humans and machines, Eliza simulated conversation by using a "pattern matching" and substitution methodology that gave users an illusion of understanding on the part of the program, but had no built in framework for contextualizing events. Directives on how to interact were provided by "scripts", written originally in MAD-Slip, which allowed ELIZA to process user inputs and engage in discourse following the rules and directions of the script. The most famous script, DOCTOR, simulated a Rogerian psychotherapist (in particular, Carl Rogers, who was well-known for simply parroting back at patients what they would just said), and used rules, dictated in the script, to respond with non-directional questions to user inputs. As such, ELIZA was one of the first chatterbots and one of the first programs capable of attempting the Turing test.' question_1 = 'What programming language was ELIZA written in?' print('question #1: {} --> answer: {}'.format(question_1, utility.get_answer(question_1, eliza_context))) question_2 = 'Who invented ELIZA?' print('question #2: {} --> answer: {}'.format(question_2, utility.get_answer(question_2, eliza_context))) question_3 = 'What is ELIZA?' print('question #3: {} --> answer: {}'.format(question_3, utility.get_answer(question_3, eliza_context))) question_4 = 'Is ELIZA a human?' print('question #4: {} --> answer: {}'.format(question_4, utility.get_answer(question_4, eliza_context))) question_5 = 'Where was ELIZA created at?' print('question #5: {} --> answer: {}'.format(question_5, utility.get_answer(question_5, eliza_context))) question_6 = 'Did ELIZA pass Turing test?' print('question #6: {} --> answer: {}'.format(question_6, utility.get_answer(question_6, eliza_context)))
Experiment I: ELIZA --> https://en.wikipedia.org/wiki/ELIZA
experiments/eliza.py
run_experiment
kjahan/bert_squad_demo
0
python
def run_experiment(): '\n \n ' eliza_context = 'ELIZA is an early natural language processing computer program created from 1964 to 1966 at the MIT Artificial Intelligence Laboratory by Joseph Weizenbaum. Created to demonstrate the superficiality of communication between humans and machines, Eliza simulated conversation by using a "pattern matching" and substitution methodology that gave users an illusion of understanding on the part of the program, but had no built in framework for contextualizing events. Directives on how to interact were provided by "scripts", written originally in MAD-Slip, which allowed ELIZA to process user inputs and engage in discourse following the rules and directions of the script. The most famous script, DOCTOR, simulated a Rogerian psychotherapist (in particular, Carl Rogers, who was well-known for simply parroting back at patients what they would just said), and used rules, dictated in the script, to respond with non-directional questions to user inputs. As such, ELIZA was one of the first chatterbots and one of the first programs capable of attempting the Turing test.' question_1 = 'What programming language was ELIZA written in?' print('question #1: {} --> answer: {}'.format(question_1, utility.get_answer(question_1, eliza_context))) question_2 = 'Who invented ELIZA?' print('question #2: {} --> answer: {}'.format(question_2, utility.get_answer(question_2, eliza_context))) question_3 = 'What is ELIZA?' print('question #3: {} --> answer: {}'.format(question_3, utility.get_answer(question_3, eliza_context))) question_4 = 'Is ELIZA a human?' print('question #4: {} --> answer: {}'.format(question_4, utility.get_answer(question_4, eliza_context))) question_5 = 'Where was ELIZA created at?' print('question #5: {} --> answer: {}'.format(question_5, utility.get_answer(question_5, eliza_context))) question_6 = 'Did ELIZA pass Turing test?' print('question #6: {} --> answer: {}'.format(question_6, utility.get_answer(question_6, eliza_context)))
def run_experiment(): '\n \n ' eliza_context = 'ELIZA is an early natural language processing computer program created from 1964 to 1966 at the MIT Artificial Intelligence Laboratory by Joseph Weizenbaum. Created to demonstrate the superficiality of communication between humans and machines, Eliza simulated conversation by using a "pattern matching" and substitution methodology that gave users an illusion of understanding on the part of the program, but had no built in framework for contextualizing events. Directives on how to interact were provided by "scripts", written originally in MAD-Slip, which allowed ELIZA to process user inputs and engage in discourse following the rules and directions of the script. The most famous script, DOCTOR, simulated a Rogerian psychotherapist (in particular, Carl Rogers, who was well-known for simply parroting back at patients what they would just said), and used rules, dictated in the script, to respond with non-directional questions to user inputs. As such, ELIZA was one of the first chatterbots and one of the first programs capable of attempting the Turing test.' question_1 = 'What programming language was ELIZA written in?' print('question #1: {} --> answer: {}'.format(question_1, utility.get_answer(question_1, eliza_context))) question_2 = 'Who invented ELIZA?' print('question #2: {} --> answer: {}'.format(question_2, utility.get_answer(question_2, eliza_context))) question_3 = 'What is ELIZA?' print('question #3: {} --> answer: {}'.format(question_3, utility.get_answer(question_3, eliza_context))) question_4 = 'Is ELIZA a human?' print('question #4: {} --> answer: {}'.format(question_4, utility.get_answer(question_4, eliza_context))) question_5 = 'Where was ELIZA created at?' print('question #5: {} --> answer: {}'.format(question_5, utility.get_answer(question_5, eliza_context))) question_6 = 'Did ELIZA pass Turing test?' print('question #6: {} --> answer: {}'.format(question_6, utility.get_answer(question_6, eliza_context)))<|docstring|>Experiment I: ELIZA --> https://en.wikipedia.org/wiki/ELIZA<|endoftext|>
16cb26783e999b62506d915f3f157f11c651ecb4ec9a6513dc6b0036fa025ee5
def get_manager(): '\n gets the cli core manager instance.\n\n :rtype: CLICoreManager\n ' return CLICoreManager()
gets the cli core manager instance. :rtype: CLICoreManager
src/pyrin/cli/core/services.py
get_manager
wilsonGmn/pyrin
0
python
def get_manager(): '\n gets the cli core manager instance.\n\n :rtype: CLICoreManager\n ' return CLICoreManager()
def get_manager(): '\n gets the cli core manager instance.\n\n :rtype: CLICoreManager\n ' return CLICoreManager()<|docstring|>gets the cli core manager instance. :rtype: CLICoreManager<|endoftext|>
e688b8d6d1e393938c322ce5b0cf3e7a18ba727971ee97a0ecfa8d0132523ebb
def create(handler_name): '\n creates the required templates using relevant handlers.\n\n :param str handler_name: handler name to be used.\n\n :raises CLICoreTemplateHandlerNotFoundError: cli core template handler not found error.\n ' return get_manager().create(handler_name)
creates the required templates using relevant handlers. :param str handler_name: handler name to be used. :raises CLICoreTemplateHandlerNotFoundError: cli core template handler not found error.
src/pyrin/cli/core/services.py
create
wilsonGmn/pyrin
0
python
def create(handler_name): '\n creates the required templates using relevant handlers.\n\n :param str handler_name: handler name to be used.\n\n :raises CLICoreTemplateHandlerNotFoundError: cli core template handler not found error.\n ' return get_manager().create(handler_name)
def create(handler_name): '\n creates the required templates using relevant handlers.\n\n :param str handler_name: handler name to be used.\n\n :raises CLICoreTemplateHandlerNotFoundError: cli core template handler not found error.\n ' return get_manager().create(handler_name)<|docstring|>creates the required templates using relevant handlers. :param str handler_name: handler name to be used. :raises CLICoreTemplateHandlerNotFoundError: cli core template handler not found error.<|endoftext|>
1eb8066eb5fffc7c9234c3c3e905c29cd2077c0ba73be18f645322c49c26f969
def show_result(self, img, result, thickness=1, font_scale=0.5, show=False, out_file=None, wait_time=0, backend='cv2', **kwargs): "Visualize tracking results.\n\n Args:\n img (str | ndarray): Filename of loaded image.\n result (dict): Tracking result.\n The value of key 'track_results' is ndarray with shape (n, 6)\n in [id, tl_x, tl_y, br_x, br_y, score] format.\n The value of key 'bbox_results' is ndarray with shape (n, 5)\n in [tl_x, tl_y, br_x, br_y, score] format.\n thickness (int, optional): Thickness of lines. Defaults to 1.\n font_scale (float, optional): Font scales of texts. Defaults\n to 0.5.\n show (bool, optional): Whether show the visualizations on the\n fly. Defaults to False.\n out_file (str | None, optional): Output filename. Defaults to None.\n backend (str, optional): Backend to draw the bounding boxes,\n options are `cv2` and `plt`. Defaults to 'cv2'.\n\n Returns:\n ndarray: Visualized image.\n " assert isinstance(result, dict) track_result = result.get('track_results', None) (bboxes, labels, ids) = restore_result(track_result, return_ids=True) img = imshow_tracks(img, bboxes, labels, ids, classes=self.CLASSES, thickness=thickness, font_scale=font_scale, show=show, out_file=out_file, wait_time=wait_time, backend=backend) return img
Visualize tracking results. Args: img (str | ndarray): Filename of loaded image. result (dict): Tracking result. The value of key 'track_results' is ndarray with shape (n, 6) in [id, tl_x, tl_y, br_x, br_y, score] format. The value of key 'bbox_results' is ndarray with shape (n, 5) in [tl_x, tl_y, br_x, br_y, score] format. thickness (int, optional): Thickness of lines. Defaults to 1. font_scale (float, optional): Font scales of texts. Defaults to 0.5. show (bool, optional): Whether show the visualizations on the fly. Defaults to False. out_file (str | None, optional): Output filename. Defaults to None. backend (str, optional): Backend to draw the bounding boxes, options are `cv2` and `plt`. Defaults to 'cv2'. Returns: ndarray: Visualized image.
tutorials/qdtrack/qdtrack.py
show_result
namtranase/ByteTrack
1,039
python
def show_result(self, img, result, thickness=1, font_scale=0.5, show=False, out_file=None, wait_time=0, backend='cv2', **kwargs): "Visualize tracking results.\n\n Args:\n img (str | ndarray): Filename of loaded image.\n result (dict): Tracking result.\n The value of key 'track_results' is ndarray with shape (n, 6)\n in [id, tl_x, tl_y, br_x, br_y, score] format.\n The value of key 'bbox_results' is ndarray with shape (n, 5)\n in [tl_x, tl_y, br_x, br_y, score] format.\n thickness (int, optional): Thickness of lines. Defaults to 1.\n font_scale (float, optional): Font scales of texts. Defaults\n to 0.5.\n show (bool, optional): Whether show the visualizations on the\n fly. Defaults to False.\n out_file (str | None, optional): Output filename. Defaults to None.\n backend (str, optional): Backend to draw the bounding boxes,\n options are `cv2` and `plt`. Defaults to 'cv2'.\n\n Returns:\n ndarray: Visualized image.\n " assert isinstance(result, dict) track_result = result.get('track_results', None) (bboxes, labels, ids) = restore_result(track_result, return_ids=True) img = imshow_tracks(img, bboxes, labels, ids, classes=self.CLASSES, thickness=thickness, font_scale=font_scale, show=show, out_file=out_file, wait_time=wait_time, backend=backend) return img
def show_result(self, img, result, thickness=1, font_scale=0.5, show=False, out_file=None, wait_time=0, backend='cv2', **kwargs): "Visualize tracking results.\n\n Args:\n img (str | ndarray): Filename of loaded image.\n result (dict): Tracking result.\n The value of key 'track_results' is ndarray with shape (n, 6)\n in [id, tl_x, tl_y, br_x, br_y, score] format.\n The value of key 'bbox_results' is ndarray with shape (n, 5)\n in [tl_x, tl_y, br_x, br_y, score] format.\n thickness (int, optional): Thickness of lines. Defaults to 1.\n font_scale (float, optional): Font scales of texts. Defaults\n to 0.5.\n show (bool, optional): Whether show the visualizations on the\n fly. Defaults to False.\n out_file (str | None, optional): Output filename. Defaults to None.\n backend (str, optional): Backend to draw the bounding boxes,\n options are `cv2` and `plt`. Defaults to 'cv2'.\n\n Returns:\n ndarray: Visualized image.\n " assert isinstance(result, dict) track_result = result.get('track_results', None) (bboxes, labels, ids) = restore_result(track_result, return_ids=True) img = imshow_tracks(img, bboxes, labels, ids, classes=self.CLASSES, thickness=thickness, font_scale=font_scale, show=show, out_file=out_file, wait_time=wait_time, backend=backend) return img<|docstring|>Visualize tracking results. Args: img (str | ndarray): Filename of loaded image. result (dict): Tracking result. The value of key 'track_results' is ndarray with shape (n, 6) in [id, tl_x, tl_y, br_x, br_y, score] format. The value of key 'bbox_results' is ndarray with shape (n, 5) in [tl_x, tl_y, br_x, br_y, score] format. thickness (int, optional): Thickness of lines. Defaults to 1. font_scale (float, optional): Font scales of texts. Defaults to 0.5. show (bool, optional): Whether show the visualizations on the fly. Defaults to False. out_file (str | None, optional): Output filename. Defaults to None. backend (str, optional): Backend to draw the bounding boxes, options are `cv2` and `plt`. Defaults to 'cv2'. Returns: ndarray: Visualized image.<|endoftext|>
67a4dcbbee31589b52c2d091761c6bc2056c27b38fde80928cbfa34600131451
def __init__(self, predictions, labels, desc, probabilities=None): 'Initialize attributes: predictions-vector of predicted values for Y, labels-vector of labels for Y' 'probabilities-optional, probability that Y is equal to True' self.probabilities = probabilities self.performance_df = pd.concat([pd.DataFrame(predictions), pd.DataFrame(labels)], axis=1) self.performance_df.columns = ['preds', 'labls'] self.desc = desc self.performance_measures = {} self.image_indices = {}
Initialize attributes: predictions-vector of predicted values for Y, labels-vector of labels for Y
data/my_measures.py
__init__
3milychu/notesonperspective_journal
0
python
def __init__(self, predictions, labels, desc, probabilities=None): 'probabilities-optional, probability that Y is equal to True' self.probabilities = probabilities self.performance_df = pd.concat([pd.DataFrame(predictions), pd.DataFrame(labels)], axis=1) self.performance_df.columns = ['preds', 'labls'] self.desc = desc self.performance_measures = {} self.image_indices = {}
def __init__(self, predictions, labels, desc, probabilities=None): 'probabilities-optional, probability that Y is equal to True' self.probabilities = probabilities self.performance_df = pd.concat([pd.DataFrame(predictions), pd.DataFrame(labels)], axis=1) self.performance_df.columns = ['preds', 'labls'] self.desc = desc self.performance_measures = {} self.image_indices = {}<|docstring|>Initialize attributes: predictions-vector of predicted values for Y, labels-vector of labels for Y<|endoftext|>
0023475cc3c5d71dd39216eacbc075e8d7d1d5616c8073ada7a79394931900bf
def compute_measures(self): 'Compute performance measures defined by Flach p. 57' self.performance_measures['Pos'] = self.performance_df['labls'].sum() self.performance_measures['Neg'] = (self.performance_df.shape[0] - self.performance_df['labls'].sum()) self.performance_measures['TP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)).sum() self.performance_measures['TN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)).sum() self.performance_measures['Accuracy'] = ((self.performance_measures['TP'] + self.performance_measures['TN']) / (self.performance_measures['Pos'] + self.performance_measures['Neg'])) self.performance_measures['Precision'] = (self.performance_measures['TP'] / (self.performance_measures['TP'] + self.performance_measures['FP'])) self.performance_measures['Recall'] = (self.performance_measures['TP'] / self.performance_measures['Pos']) self.performance_measures['desc'] = self.desc
Compute performance measures defined by Flach p. 57
data/my_measures.py
compute_measures
3milychu/notesonperspective_journal
0
python
def compute_measures(self): self.performance_measures['Pos'] = self.performance_df['labls'].sum() self.performance_measures['Neg'] = (self.performance_df.shape[0] - self.performance_df['labls'].sum()) self.performance_measures['TP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)).sum() self.performance_measures['TN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)).sum() self.performance_measures['Accuracy'] = ((self.performance_measures['TP'] + self.performance_measures['TN']) / (self.performance_measures['Pos'] + self.performance_measures['Neg'])) self.performance_measures['Precision'] = (self.performance_measures['TP'] / (self.performance_measures['TP'] + self.performance_measures['FP'])) self.performance_measures['Recall'] = (self.performance_measures['TP'] / self.performance_measures['Pos']) self.performance_measures['desc'] = self.desc
def compute_measures(self): self.performance_measures['Pos'] = self.performance_df['labls'].sum() self.performance_measures['Neg'] = (self.performance_df.shape[0] - self.performance_df['labls'].sum()) self.performance_measures['TP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)).sum() self.performance_measures['TN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FP'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)).sum() self.performance_measures['FN'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)).sum() self.performance_measures['Accuracy'] = ((self.performance_measures['TP'] + self.performance_measures['TN']) / (self.performance_measures['Pos'] + self.performance_measures['Neg'])) self.performance_measures['Precision'] = (self.performance_measures['TP'] / (self.performance_measures['TP'] + self.performance_measures['FP'])) self.performance_measures['Recall'] = (self.performance_measures['TP'] / self.performance_measures['Pos']) self.performance_measures['desc'] = self.desc<|docstring|>Compute performance measures defined by Flach p. 57<|endoftext|>
09190562f9b48657bdf5b509d6d05ddfd660e80fe7ea6e527017680496bd966a
def img_indices(self): 'Get the indices of true and false positives to be able to locate the corresponding images in a list of image names' self.performance_df['tp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)) self.performance_df['fp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)) self.performance_df['fn_ind'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)) self.image_indices['TP_indices'] = np.where((self.performance_df['tp_ind'] == True))[0].tolist() self.image_indices['FP_indices'] = np.where((self.performance_df['fp_ind'] == True))[0].tolist() self.image_indices['FN_indices'] = np.where((self.performance_df['fn_ind'] == True))[0].tolist()
Get the indices of true and false positives to be able to locate the corresponding images in a list of image names
data/my_measures.py
img_indices
3milychu/notesonperspective_journal
0
python
def img_indices(self): self.performance_df['tp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)) self.performance_df['fp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)) self.performance_df['fn_ind'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)) self.image_indices['TP_indices'] = np.where((self.performance_df['tp_ind'] == True))[0].tolist() self.image_indices['FP_indices'] = np.where((self.performance_df['fp_ind'] == True))[0].tolist() self.image_indices['FN_indices'] = np.where((self.performance_df['fn_ind'] == True))[0].tolist()
def img_indices(self): self.performance_df['tp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == True)) self.performance_df['fp_ind'] = ((self.performance_df['preds'] == True) & (self.performance_df['labls'] == False)) self.performance_df['fn_ind'] = ((self.performance_df['preds'] == False) & (self.performance_df['labls'] == True)) self.image_indices['TP_indices'] = np.where((self.performance_df['tp_ind'] == True))[0].tolist() self.image_indices['FP_indices'] = np.where((self.performance_df['fp_ind'] == True))[0].tolist() self.image_indices['FN_indices'] = np.where((self.performance_df['fn_ind'] == True))[0].tolist()<|docstring|>Get the indices of true and false positives to be able to locate the corresponding images in a list of image names<|endoftext|>
4693f5b4fbf7f5501594a39e5788857cc52e4da4fdc7bc94c7be0a7966e66901
def wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str: 'Return header of a vectorcall wrapper function.\n\n See comment above for a summary of the arguments.\n ' return 'PyObject *{prefix}{name}(PyObject *self, PyObject *const *args, size_t nargs, PyObject *kwnames)'.format(prefix=PREFIX, name=fn.cname(names))
Return header of a vectorcall wrapper function. See comment above for a summary of the arguments.
mypyc/codegen/emitwrapper.py
wrapper_function_header
srittau/mypy
12,496
python
def wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str: 'Return header of a vectorcall wrapper function.\n\n See comment above for a summary of the arguments.\n ' return 'PyObject *{prefix}{name}(PyObject *self, PyObject *const *args, size_t nargs, PyObject *kwnames)'.format(prefix=PREFIX, name=fn.cname(names))
def wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str: 'Return header of a vectorcall wrapper function.\n\n See comment above for a summary of the arguments.\n ' return 'PyObject *{prefix}{name}(PyObject *self, PyObject *const *args, size_t nargs, PyObject *kwnames)'.format(prefix=PREFIX, name=fn.cname(names))<|docstring|>Return header of a vectorcall wrapper function. See comment above for a summary of the arguments.<|endoftext|>
002c90ffedba1253008b03c1147b3203e4da74b20cd913fedb4a45a04520793e
def make_arg_groups(args: List[RuntimeArg]) -> Dict[(ArgKind, List[RuntimeArg])]: 'Group arguments by kind.' return {k: [arg for arg in args if (arg.kind == k)] for k in ArgKind}
Group arguments by kind.
mypyc/codegen/emitwrapper.py
make_arg_groups
srittau/mypy
12,496
python
def make_arg_groups(args: List[RuntimeArg]) -> Dict[(ArgKind, List[RuntimeArg])]: return {k: [arg for arg in args if (arg.kind == k)] for k in ArgKind}
def make_arg_groups(args: List[RuntimeArg]) -> Dict[(ArgKind, List[RuntimeArg])]: return {k: [arg for arg in args if (arg.kind == k)] for k in ArgKind}<|docstring|>Group arguments by kind.<|endoftext|>
95c823f05edff54e564a7b9528804dce7f098b45499f123c114a08fb19b42069
def reorder_arg_groups(groups: Dict[(ArgKind, List[RuntimeArg])]) -> List[RuntimeArg]: 'Reorder argument groups to match their order in a format string.' return (((groups[ARG_POS] + groups[ARG_OPT]) + groups[ARG_NAMED_OPT]) + groups[ARG_NAMED])
Reorder argument groups to match their order in a format string.
mypyc/codegen/emitwrapper.py
reorder_arg_groups
srittau/mypy
12,496
python
def reorder_arg_groups(groups: Dict[(ArgKind, List[RuntimeArg])]) -> List[RuntimeArg]: return (((groups[ARG_POS] + groups[ARG_OPT]) + groups[ARG_NAMED_OPT]) + groups[ARG_NAMED])
def reorder_arg_groups(groups: Dict[(ArgKind, List[RuntimeArg])]) -> List[RuntimeArg]: return (((groups[ARG_POS] + groups[ARG_OPT]) + groups[ARG_NAMED_OPT]) + groups[ARG_NAMED])<|docstring|>Reorder argument groups to match their order in a format string.<|endoftext|>
07c5b1f79a41e3444f968c4468eea1df8459b199d97e176d37239491368f6f63
def make_format_string(func_name: Optional[str], groups: Dict[(ArgKind, List[RuntimeArg])]) -> str: "Return a format string that specifies the accepted arguments.\n\n The format string is an extended subset of what is supported by\n PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we\n also support some extensions:\n\n - Required keyword-only arguments are introduced after '@'\n - If the function receives *args or **kwargs, we add a '%' prefix\n\n Each group requires the previous groups' delimiters to be present\n first.\n\n These are used by both vectorcall and legacy wrapper functions.\n " format = '' if (groups[ARG_STAR] or groups[ARG_STAR2]): format += '%' format += ('O' * len(groups[ARG_POS])) if (groups[ARG_OPT] or groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('|' + ('O' * len(groups[ARG_OPT]))) if (groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('$' + ('O' * len(groups[ARG_NAMED_OPT]))) if groups[ARG_NAMED]: format += ('@' + ('O' * len(groups[ARG_NAMED]))) if (func_name is not None): format += ':{}'.format(func_name) return format
Return a format string that specifies the accepted arguments. The format string is an extended subset of what is supported by PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we also support some extensions: - Required keyword-only arguments are introduced after '@' - If the function receives *args or **kwargs, we add a '%' prefix Each group requires the previous groups' delimiters to be present first. These are used by both vectorcall and legacy wrapper functions.
mypyc/codegen/emitwrapper.py
make_format_string
srittau/mypy
12,496
python
def make_format_string(func_name: Optional[str], groups: Dict[(ArgKind, List[RuntimeArg])]) -> str: "Return a format string that specifies the accepted arguments.\n\n The format string is an extended subset of what is supported by\n PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we\n also support some extensions:\n\n - Required keyword-only arguments are introduced after '@'\n - If the function receives *args or **kwargs, we add a '%' prefix\n\n Each group requires the previous groups' delimiters to be present\n first.\n\n These are used by both vectorcall and legacy wrapper functions.\n " format = if (groups[ARG_STAR] or groups[ARG_STAR2]): format += '%' format += ('O' * len(groups[ARG_POS])) if (groups[ARG_OPT] or groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('|' + ('O' * len(groups[ARG_OPT]))) if (groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('$' + ('O' * len(groups[ARG_NAMED_OPT]))) if groups[ARG_NAMED]: format += ('@' + ('O' * len(groups[ARG_NAMED]))) if (func_name is not None): format += ':{}'.format(func_name) return format
def make_format_string(func_name: Optional[str], groups: Dict[(ArgKind, List[RuntimeArg])]) -> str: "Return a format string that specifies the accepted arguments.\n\n The format string is an extended subset of what is supported by\n PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we\n also support some extensions:\n\n - Required keyword-only arguments are introduced after '@'\n - If the function receives *args or **kwargs, we add a '%' prefix\n\n Each group requires the previous groups' delimiters to be present\n first.\n\n These are used by both vectorcall and legacy wrapper functions.\n " format = if (groups[ARG_STAR] or groups[ARG_STAR2]): format += '%' format += ('O' * len(groups[ARG_POS])) if (groups[ARG_OPT] or groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('|' + ('O' * len(groups[ARG_OPT]))) if (groups[ARG_NAMED_OPT] or groups[ARG_NAMED]): format += ('$' + ('O' * len(groups[ARG_NAMED_OPT]))) if groups[ARG_NAMED]: format += ('@' + ('O' * len(groups[ARG_NAMED]))) if (func_name is not None): format += ':{}'.format(func_name) return format<|docstring|>Return a format string that specifies the accepted arguments. The format string is an extended subset of what is supported by PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we also support some extensions: - Required keyword-only arguments are introduced after '@' - If the function receives *args or **kwargs, we add a '%' prefix Each group requires the previous groups' delimiters to be present first. These are used by both vectorcall and legacy wrapper functions.<|endoftext|>
cd82012252399b4a2448d939efb402929896e3729b13c4f2023d0c0835c7078c
def generate_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generate a CPython-compatible vectorcall wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) fmt = make_format_string(fn.name, groups) emitter.emit_line('static CPyArg_Parser parser = {{"{}", kwlist, 0}};'.format(fmt)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ''))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] if ((fn.name == '__call__') and use_vectorcall(emitter.capi_version)): nargs = 'PyVectorcall_NARGS(nargs)' else: nargs = 'nargs' parse_fn = 'CPyArg_ParseStackAndKeywords' if (len(real_args) == 0): parse_fn = 'CPyArg_ParseStackAndKeywordsNoArgs' elif ((len(real_args) == 1) and (len(groups[ARG_POS]) == 1)): parse_fn = 'CPyArg_ParseStackAndKeywordsOneArg' elif (len(real_args) == (len(groups[ARG_POS]) + len(groups[ARG_OPT]))): parse_fn = 'CPyArg_ParseStackAndKeywordsSimple' emitter.emit_lines('if (!{}(args, {}, kwnames, &parser{})) {{'.format(parse_fn, nargs, ''.join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')
Generate a CPython-compatible vectorcall wrapper for a native function. In particular, this handles unboxing the arguments, calling the native function, and then boxing the return value.
mypyc/codegen/emitwrapper.py
generate_wrapper_function
srittau/mypy
12,496
python
def generate_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generate a CPython-compatible vectorcall wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) fmt = make_format_string(fn.name, groups) emitter.emit_line('static CPyArg_Parser parser = {{"{}", kwlist, 0}};'.format(fmt)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] if ((fn.name == '__call__') and use_vectorcall(emitter.capi_version)): nargs = 'PyVectorcall_NARGS(nargs)' else: nargs = 'nargs' parse_fn = 'CPyArg_ParseStackAndKeywords' if (len(real_args) == 0): parse_fn = 'CPyArg_ParseStackAndKeywordsNoArgs' elif ((len(real_args) == 1) and (len(groups[ARG_POS]) == 1)): parse_fn = 'CPyArg_ParseStackAndKeywordsOneArg' elif (len(real_args) == (len(groups[ARG_POS]) + len(groups[ARG_OPT]))): parse_fn = 'CPyArg_ParseStackAndKeywordsSimple' emitter.emit_lines('if (!{}(args, {}, kwnames, &parser{})) {{'.format(parse_fn, nargs, .join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')
def generate_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generate a CPython-compatible vectorcall wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) fmt = make_format_string(fn.name, groups) emitter.emit_line('static CPyArg_Parser parser = {{"{}", kwlist, 0}};'.format(fmt)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] if ((fn.name == '__call__') and use_vectorcall(emitter.capi_version)): nargs = 'PyVectorcall_NARGS(nargs)' else: nargs = 'nargs' parse_fn = 'CPyArg_ParseStackAndKeywords' if (len(real_args) == 0): parse_fn = 'CPyArg_ParseStackAndKeywordsNoArgs' elif ((len(real_args) == 1) and (len(groups[ARG_POS]) == 1)): parse_fn = 'CPyArg_ParseStackAndKeywordsOneArg' elif (len(real_args) == (len(groups[ARG_POS]) + len(groups[ARG_OPT]))): parse_fn = 'CPyArg_ParseStackAndKeywordsSimple' emitter.emit_lines('if (!{}(args, {}, kwnames, &parser{})) {{'.format(parse_fn, nargs, .join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')<|docstring|>Generate a CPython-compatible vectorcall wrapper for a native function. In particular, this handles unboxing the arguments, calling the native function, and then boxing the return value.<|endoftext|>
b67bdc7643fa029bb4766f8d0fb7d858a36b93855cb4eeef18551ed4f9cec07c
def generate_legacy_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generates a CPython-compatible legacy wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(legacy_wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ''))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] emitter.emit_lines('if (!CPyArg_ParseTupleAndKeywords(args, kw, "{}", "{}", kwlist{})) {{'.format(make_format_string(None, groups), fn.name, ''.join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')
Generates a CPython-compatible legacy wrapper for a native function. In particular, this handles unboxing the arguments, calling the native function, and then boxing the return value.
mypyc/codegen/emitwrapper.py
generate_legacy_wrapper_function
srittau/mypy
12,496
python
def generate_legacy_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generates a CPython-compatible legacy wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(legacy_wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] emitter.emit_lines('if (!CPyArg_ParseTupleAndKeywords(args, kw, "{}", "{}", kwlist{})) {{'.format(make_format_string(None, groups), fn.name, .join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')
def generate_legacy_wrapper_function(fn: FuncIR, emitter: Emitter, source_path: str, module_name: str) -> None: 'Generates a CPython-compatible legacy wrapper for a native function.\n\n In particular, this handles unboxing the arguments, calling the native function, and\n then boxing the return value.\n ' emitter.emit_line('{} {{'.format(legacy_wrapper_function_header(fn, emitter.names))) real_args = list(fn.args) if (fn.class_name and (not (fn.decl.kind == FUNC_STATICMETHOD))): arg = real_args.pop(0) emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name)) groups = make_arg_groups(real_args) reordered_args = reorder_arg_groups(groups) emitter.emit_line(make_static_kwlist(reordered_args)) for arg in real_args: emitter.emit_line('PyObject *obj_{}{};'.format(arg.name, (' = NULL' if arg.optional else ))) cleanups = ['CPy_DECREF(obj_{});'.format(arg.name) for arg in (groups[ARG_STAR] + groups[ARG_STAR2])] arg_ptrs: List[str] = [] if (groups[ARG_STAR] or groups[ARG_STAR2]): arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL')] arg_ptrs += [('&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL')] arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args] emitter.emit_lines('if (!CPyArg_ParseTupleAndKeywords(args, kw, "{}", "{}", kwlist{})) {{'.format(make_format_string(None, groups), fn.name, .join(((', ' + n) for n in arg_ptrs))), 'return NULL;', '}') traceback_code = generate_traceback_code(fn, emitter, source_path, module_name) generate_wrapper_core(fn, emitter, (groups[ARG_OPT] + groups[ARG_NAMED_OPT]), cleanups=cleanups, traceback_code=traceback_code) emitter.emit_line('}')<|docstring|>Generates a CPython-compatible legacy wrapper for a native function. In particular, this handles unboxing the arguments, calling the native function, and then boxing the return value.<|endoftext|>
2d895a160411363017b72f33097e393d7de985f3f33942ad3aa696428db2e92b
def generate_dunder_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __dunder__ methods to be able to fit into the mapping\n protocol slot. This specifically means that the arguments are taken as *PyObjects and returned\n as *PyObjects.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.emit_header() gen.emit_arg_processing() gen.emit_call() gen.finish() return gen.wrapper_name()
Generates a wrapper for native __dunder__ methods to be able to fit into the mapping protocol slot. This specifically means that the arguments are taken as *PyObjects and returned as *PyObjects.
mypyc/codegen/emitwrapper.py
generate_dunder_wrapper
srittau/mypy
12,496
python
def generate_dunder_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __dunder__ methods to be able to fit into the mapping\n protocol slot. This specifically means that the arguments are taken as *PyObjects and returned\n as *PyObjects.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.emit_header() gen.emit_arg_processing() gen.emit_call() gen.finish() return gen.wrapper_name()
def generate_dunder_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __dunder__ methods to be able to fit into the mapping\n protocol slot. This specifically means that the arguments are taken as *PyObjects and returned\n as *PyObjects.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.emit_header() gen.emit_arg_processing() gen.emit_call() gen.finish() return gen.wrapper_name()<|docstring|>Generates a wrapper for native __dunder__ methods to be able to fit into the mapping protocol slot. This specifically means that the arguments are taken as *PyObjects and returned as *PyObjects.<|endoftext|>
27612451da5533d7f9b93e8db6f3bd695eeb21bc846b9d2a680b0e28464854b6
def generate_bin_op_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for a native binary dunder method.\n\n The same wrapper that handles the forward method (e.g. __add__) also handles\n the corresponding reverse method (e.g. __radd__), if defined.\n\n Both arguments and the return value are PyObject *.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.arg_names = ['left', 'right'] wrapper_name = gen.wrapper_name() gen.emit_header() if ((fn.name not in reverse_op_methods) and (fn.name in reverse_op_method_names)): generate_bin_op_reverse_only_wrapper(cl, fn, emitter, gen) else: rmethod = reverse_op_methods[fn.name] fn_rev = cl.get_method(rmethod) if (fn_rev is None): generate_bin_op_forward_only_wrapper(cl, fn, emitter, gen) else: generate_bin_op_both_wrappers(cl, fn, fn_rev, emitter, gen) return wrapper_name
Generates a wrapper for a native binary dunder method. The same wrapper that handles the forward method (e.g. __add__) also handles the corresponding reverse method (e.g. __radd__), if defined. Both arguments and the return value are PyObject *.
mypyc/codegen/emitwrapper.py
generate_bin_op_wrapper
srittau/mypy
12,496
python
def generate_bin_op_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for a native binary dunder method.\n\n The same wrapper that handles the forward method (e.g. __add__) also handles\n the corresponding reverse method (e.g. __radd__), if defined.\n\n Both arguments and the return value are PyObject *.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.arg_names = ['left', 'right'] wrapper_name = gen.wrapper_name() gen.emit_header() if ((fn.name not in reverse_op_methods) and (fn.name in reverse_op_method_names)): generate_bin_op_reverse_only_wrapper(cl, fn, emitter, gen) else: rmethod = reverse_op_methods[fn.name] fn_rev = cl.get_method(rmethod) if (fn_rev is None): generate_bin_op_forward_only_wrapper(cl, fn, emitter, gen) else: generate_bin_op_both_wrappers(cl, fn, fn_rev, emitter, gen) return wrapper_name
def generate_bin_op_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for a native binary dunder method.\n\n The same wrapper that handles the forward method (e.g. __add__) also handles\n the corresponding reverse method (e.g. __radd__), if defined.\n\n Both arguments and the return value are PyObject *.\n ' gen = WrapperGenerator(cl, emitter) gen.set_target(fn) gen.arg_names = ['left', 'right'] wrapper_name = gen.wrapper_name() gen.emit_header() if ((fn.name not in reverse_op_methods) and (fn.name in reverse_op_method_names)): generate_bin_op_reverse_only_wrapper(cl, fn, emitter, gen) else: rmethod = reverse_op_methods[fn.name] fn_rev = cl.get_method(rmethod) if (fn_rev is None): generate_bin_op_forward_only_wrapper(cl, fn, emitter, gen) else: generate_bin_op_both_wrappers(cl, fn, fn_rev, emitter, gen) return wrapper_name<|docstring|>Generates a wrapper for a native binary dunder method. The same wrapper that handles the forward method (e.g. __add__) also handles the corresponding reverse method (e.g. __radd__), if defined. Both arguments and the return value are PyObject *.<|endoftext|>
c4e83fc8934d9caef85a4db9cc0a2a7495ffe9c4dab71e9841651fd0e9f3bbdf
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]: 'Generates a wrapper for richcompare dunder methods.' matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)]) if (not matches): return None name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(name=name)) emitter.emit_line('switch (op) {') for func in matches: emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func])) method = cl.get_method(func) assert (method is not None) generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs']) emitter.emit_line('}') emitter.emit_line('}') emitter.emit_line('Py_INCREF(Py_NotImplemented);') emitter.emit_line('return Py_NotImplemented;') emitter.emit_line('}') return name
Generates a wrapper for richcompare dunder methods.
mypyc/codegen/emitwrapper.py
generate_richcompare_wrapper
srittau/mypy
12,496
python
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]: matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)]) if (not matches): return None name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(name=name)) emitter.emit_line('switch (op) {') for func in matches: emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func])) method = cl.get_method(func) assert (method is not None) generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs']) emitter.emit_line('}') emitter.emit_line('}') emitter.emit_line('Py_INCREF(Py_NotImplemented);') emitter.emit_line('return Py_NotImplemented;') emitter.emit_line('}') return name
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]: matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)]) if (not matches): return None name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(name=name)) emitter.emit_line('switch (op) {') for func in matches: emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func])) method = cl.get_method(func) assert (method is not None) generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs']) emitter.emit_line('}') emitter.emit_line('}') emitter.emit_line('Py_INCREF(Py_NotImplemented);') emitter.emit_line('return Py_NotImplemented;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for richcompare dunder methods.<|endoftext|>
00169aa8929fa7b1b49108a51025bc20a3d31869435312a5dcecf63dce2fe479
def generate_get_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __get__ methods.' name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *self, PyObject *instance, PyObject *owner) {{'.format(name=name)) emitter.emit_line('instance = instance ? instance : Py_None;') emitter.emit_line('return {}{}(self, instance, owner);'.format(NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_line('}') return name
Generates a wrapper for native __get__ methods.
mypyc/codegen/emitwrapper.py
generate_get_wrapper
srittau/mypy
12,496
python
def generate_get_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *self, PyObject *instance, PyObject *owner) {{'.format(name=name)) emitter.emit_line('instance = instance ? instance : Py_None;') emitter.emit_line('return {}{}(self, instance, owner);'.format(NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_line('}') return name
def generate_get_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static PyObject *{name}(PyObject *self, PyObject *instance, PyObject *owner) {{'.format(name=name)) emitter.emit_line('instance = instance ? instance : Py_None;') emitter.emit_line('return {}{}(self, instance, owner);'.format(NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_line('}') return name<|docstring|>Generates a wrapper for native __get__ methods.<|endoftext|>
34f31bd1e1efa052359411c64fa33b2c5cdc2d480d1a9072a64791e237ccdd10
def generate_hash_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __hash__ methods.' name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('if (val == -1) return -2;') emitter.emit_line('return val;') emitter.emit_line('}') return name
Generates a wrapper for native __hash__ methods.
mypyc/codegen/emitwrapper.py
generate_hash_wrapper
srittau/mypy
12,496
python
def generate_hash_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('if (val == -1) return -2;') emitter.emit_line('return val;') emitter.emit_line('}') return name
def generate_hash_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('if (val == -1) return -2;') emitter.emit_line('return val;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for native __hash__ methods.<|endoftext|>
a2bde8f3fb42a1f6735e7322d4675b2f588d06103f8b00ef8981f3f397e0ef27
def generate_len_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __len__ methods.' name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('return val;') emitter.emit_line('}') return name
Generates a wrapper for native __len__ methods.
mypyc/codegen/emitwrapper.py
generate_len_wrapper
srittau/mypy
12,496
python
def generate_len_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('return val;') emitter.emit_line('}') return name
def generate_len_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), emitter.get_group_prefix(fn.decl), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('retval', fn.ret_type, 'return -1;') if is_int_rprimitive(fn.ret_type): emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);') else: emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);') emitter.emit_dec_ref('retval', fn.ret_type) emitter.emit_line('if (PyErr_Occurred()) return -1;') emitter.emit_line('return val;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for native __len__ methods.<|endoftext|>
4d7a4a56f7379787520d545943a29af58c77b3b53a2c1aed896dde80a90bf4ad
def generate_bool_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __bool__ methods.' name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}val = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') assert is_bool_rprimitive(fn.ret_type), 'Only bool return supported for __bool__' emitter.emit_line('return val;') emitter.emit_line('}') return name
Generates a wrapper for native __bool__ methods.
mypyc/codegen/emitwrapper.py
generate_bool_wrapper
srittau/mypy
12,496
python
def generate_bool_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}val = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') assert is_bool_rprimitive(fn.ret_type), 'Only bool return supported for __bool__' emitter.emit_line('return val;') emitter.emit_line('}') return name
def generate_bool_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self) {{'.format(name=name)) emitter.emit_line('{}val = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') assert is_bool_rprimitive(fn.ret_type), 'Only bool return supported for __bool__' emitter.emit_line('return val;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for native __bool__ methods.<|endoftext|>
08c5c468c6eac458ee584587eff3aa943c12dad24cceaa6477aa2d51dca3f0b9
def generate_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __delitem__.\n\n This is only called from a combined __delitem__/__setitem__ wrapper.\n ' name = '{}{}{}'.format(DUNDER_PREFIX, '__delitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in fn.args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) generate_set_del_item_wrapper_inner(fn, emitter, fn.args) return name
Generates a wrapper for native __delitem__. This is only called from a combined __delitem__/__setitem__ wrapper.
mypyc/codegen/emitwrapper.py
generate_del_item_wrapper
srittau/mypy
12,496
python
def generate_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __delitem__.\n\n This is only called from a combined __delitem__/__setitem__ wrapper.\n ' name = '{}{}{}'.format(DUNDER_PREFIX, '__delitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in fn.args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) generate_set_del_item_wrapper_inner(fn, emitter, fn.args) return name
def generate_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __delitem__.\n\n This is only called from a combined __delitem__/__setitem__ wrapper.\n ' name = '{}{}{}'.format(DUNDER_PREFIX, '__delitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in fn.args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) generate_set_del_item_wrapper_inner(fn, emitter, fn.args) return name<|docstring|>Generates a wrapper for native __delitem__. This is only called from a combined __delitem__/__setitem__ wrapper.<|endoftext|>
25cc8c58e729867b447717f8dd7d305674974f8fbff9bcdf83dc539e346a5a0c
def generate_set_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __setitem__ method (also works for __delitem__).\n\n This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we\n return a negative C int on error.\n\n Create a separate wrapper function for __delitem__ as needed and have the\n __setitem__ wrapper call it if the value is NULL. Return the name\n of the outer (__setitem__) wrapper.\n ' method_cls = cl.get_method_and_class('__delitem__') del_name = None if (method_cls and (method_cls[1] == cl)): del_name = generate_del_item_wrapper(cl, method_cls[0], emitter) args = fn.args if (fn.name == '__delitem__'): args = (list(args) + [RuntimeArg('___value', object_rprimitive, ARG_POS)]) name = '{}{}{}'.format(DUNDER_PREFIX, '__setitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) emitter.emit_line('if (obj_{} == NULL) {{'.format(args[2].name)) if (del_name is not None): emitter.emit_line('return {}(obj_{}, obj_{});'.format(del_name, args[0].name, args[1].name)) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result = PyObject_CallMethod(super, "__delitem__", "O", obj_{});'.format(args[1].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') method_cls = cl.get_method_and_class('__setitem__') if (method_cls and (method_cls[1] == cl)): generate_set_del_item_wrapper_inner(fn, emitter, args) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result;') if ((method_cls is None) and (cl.builtin_base is None)): msg = "'{}' object does not support item assignment".format(cl.name) emitter.emit_line('PyErr_SetString(PyExc_TypeError, "{}");'.format(msg)) emitter.emit_line('result = NULL;') else: emitter.emit_line('result = PyObject_CallMethod(super, "__setitem__", "OO", obj_{}, obj_{});'.format(args[1].name, args[2].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') return name
Generates a wrapper for native __setitem__ method (also works for __delitem__). This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we return a negative C int on error. Create a separate wrapper function for __delitem__ as needed and have the __setitem__ wrapper call it if the value is NULL. Return the name of the outer (__setitem__) wrapper.
mypyc/codegen/emitwrapper.py
generate_set_del_item_wrapper
srittau/mypy
12,496
python
def generate_set_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __setitem__ method (also works for __delitem__).\n\n This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we\n return a negative C int on error.\n\n Create a separate wrapper function for __delitem__ as needed and have the\n __setitem__ wrapper call it if the value is NULL. Return the name\n of the outer (__setitem__) wrapper.\n ' method_cls = cl.get_method_and_class('__delitem__') del_name = None if (method_cls and (method_cls[1] == cl)): del_name = generate_del_item_wrapper(cl, method_cls[0], emitter) args = fn.args if (fn.name == '__delitem__'): args = (list(args) + [RuntimeArg('___value', object_rprimitive, ARG_POS)]) name = '{}{}{}'.format(DUNDER_PREFIX, '__setitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) emitter.emit_line('if (obj_{} == NULL) {{'.format(args[2].name)) if (del_name is not None): emitter.emit_line('return {}(obj_{}, obj_{});'.format(del_name, args[0].name, args[1].name)) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result = PyObject_CallMethod(super, "__delitem__", "O", obj_{});'.format(args[1].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') method_cls = cl.get_method_and_class('__setitem__') if (method_cls and (method_cls[1] == cl)): generate_set_del_item_wrapper_inner(fn, emitter, args) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result;') if ((method_cls is None) and (cl.builtin_base is None)): msg = "'{}' object does not support item assignment".format(cl.name) emitter.emit_line('PyErr_SetString(PyExc_TypeError, "{}");'.format(msg)) emitter.emit_line('result = NULL;') else: emitter.emit_line('result = PyObject_CallMethod(super, "__setitem__", "OO", obj_{}, obj_{});'.format(args[1].name, args[2].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') return name
def generate_set_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for native __setitem__ method (also works for __delitem__).\n\n This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we\n return a negative C int on error.\n\n Create a separate wrapper function for __delitem__ as needed and have the\n __setitem__ wrapper call it if the value is NULL. Return the name\n of the outer (__setitem__) wrapper.\n ' method_cls = cl.get_method_and_class('__delitem__') del_name = None if (method_cls and (method_cls[1] == cl)): del_name = generate_del_item_wrapper(cl, method_cls[0], emitter) args = fn.args if (fn.name == '__delitem__'): args = (list(args) + [RuntimeArg('___value', object_rprimitive, ARG_POS)]) name = '{}{}{}'.format(DUNDER_PREFIX, '__setitem__', cl.name_prefix(emitter.names)) input_args = ', '.join(('PyObject *obj_{}'.format(arg.name) for arg in args)) emitter.emit_line('static int {name}({input_args}) {{'.format(name=name, input_args=input_args)) emitter.emit_line('if (obj_{} == NULL) {{'.format(args[2].name)) if (del_name is not None): emitter.emit_line('return {}(obj_{}, obj_{});'.format(del_name, args[0].name, args[1].name)) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result = PyObject_CallMethod(super, "__delitem__", "O", obj_{});'.format(args[1].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') method_cls = cl.get_method_and_class('__setitem__') if (method_cls and (method_cls[1] == cl)): generate_set_del_item_wrapper_inner(fn, emitter, args) else: emitter.emit_line('PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name)) emitter.emit_line('if (super == NULL) return -1;') emitter.emit_line('PyObject *result;') if ((method_cls is None) and (cl.builtin_base is None)): msg = "'{}' object does not support item assignment".format(cl.name) emitter.emit_line('PyErr_SetString(PyExc_TypeError, "{}");'.format(msg)) emitter.emit_line('result = NULL;') else: emitter.emit_line('result = PyObject_CallMethod(super, "__setitem__", "OO", obj_{}, obj_{});'.format(args[1].name, args[2].name)) emitter.emit_line('Py_DECREF(super);') emitter.emit_line('Py_XDECREF(result);') emitter.emit_line('return result == NULL ? -1 : 0;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for native __setitem__ method (also works for __delitem__). This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we return a negative C int on error. Create a separate wrapper function for __delitem__ as needed and have the __setitem__ wrapper call it if the value is NULL. Return the name of the outer (__setitem__) wrapper.<|endoftext|>
4a1de36dffe13bcb4c49106970fc582b825c1694b415891651c42bbecdee9732
def generate_contains_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: 'Generates a wrapper for a native __contains__ method.' name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self, PyObject *obj_item) {{'.format(name=name)) generate_arg_check('item', fn.args[1].type, emitter, ReturnHandler('-1')) emitter.emit_line('{}val = {}{}(self, arg_item);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') if is_bool_rprimitive(fn.ret_type): emitter.emit_line('return val;') else: emitter.emit_line('int boolval = PyObject_IsTrue(val);') emitter.emit_dec_ref('val', fn.ret_type) emitter.emit_line('return boolval;') emitter.emit_line('}') return name
Generates a wrapper for a native __contains__ method.
mypyc/codegen/emitwrapper.py
generate_contains_wrapper
srittau/mypy
12,496
python
def generate_contains_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self, PyObject *obj_item) {{'.format(name=name)) generate_arg_check('item', fn.args[1].type, emitter, ReturnHandler('-1')) emitter.emit_line('{}val = {}{}(self, arg_item);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') if is_bool_rprimitive(fn.ret_type): emitter.emit_line('return val;') else: emitter.emit_line('int boolval = PyObject_IsTrue(val);') emitter.emit_dec_ref('val', fn.ret_type) emitter.emit_line('return boolval;') emitter.emit_line('}') return name
def generate_contains_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str: name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names)) emitter.emit_line('static int {name}(PyObject *self, PyObject *obj_item) {{'.format(name=name)) generate_arg_check('item', fn.args[1].type, emitter, ReturnHandler('-1')) emitter.emit_line('{}val = {}{}(self, arg_item);'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names))) emitter.emit_error_check('val', fn.ret_type, 'return -1;') if is_bool_rprimitive(fn.ret_type): emitter.emit_line('return val;') else: emitter.emit_line('int boolval = PyObject_IsTrue(val);') emitter.emit_dec_ref('val', fn.ret_type) emitter.emit_line('return boolval;') emitter.emit_line('}') return name<|docstring|>Generates a wrapper for a native __contains__ method.<|endoftext|>
1862172396140f810f8c1d8c8256b58200a00644cd361d57bd35965665813455
def generate_wrapper_core(fn: FuncIR, emitter: Emitter, optional_args: Optional[List[RuntimeArg]]=None, arg_names: Optional[List[str]]=None, cleanups: Optional[List[str]]=None, traceback_code: Optional[str]=None) -> None: 'Generates the core part of a wrapper function for a native function.\n\n This expects each argument as a PyObject * named obj_{arg} as a precondition.\n It converts the PyObject *s to the necessary types, checking and unboxing if necessary,\n makes the call, then boxes the result if necessary and returns it.\n ' optional_args = (optional_args or []) cleanups = (cleanups or []) use_goto = bool((cleanups or traceback_code)) error = (ReturnHandler('NULL') if (not use_goto) else GotoHandler('fail')) arg_names = (arg_names or [arg.name for arg in fn.args]) for (arg_name, arg) in zip(arg_names, fn.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, emitter, error, optional=(arg in optional_args)) native_args = ', '.join(('arg_{}'.format(arg) for arg in arg_names)) if (fn.ret_type.is_unboxed or use_goto): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names), native_args)) emitter.emit_lines(*cleanups) if fn.ret_type.is_unboxed: emitter.emit_error_check('retval', fn.ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', fn.ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if fn.ret_type.is_unboxed else 'retval'))) else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, fn.cname(emitter.names), native_args)) if use_goto: emitter.emit_label('fail') emitter.emit_lines(*cleanups) if traceback_code: emitter.emit_lines(traceback_code) emitter.emit_lines('return NULL;')
Generates the core part of a wrapper function for a native function. This expects each argument as a PyObject * named obj_{arg} as a precondition. It converts the PyObject *s to the necessary types, checking and unboxing if necessary, makes the call, then boxes the result if necessary and returns it.
mypyc/codegen/emitwrapper.py
generate_wrapper_core
srittau/mypy
12,496
python
def generate_wrapper_core(fn: FuncIR, emitter: Emitter, optional_args: Optional[List[RuntimeArg]]=None, arg_names: Optional[List[str]]=None, cleanups: Optional[List[str]]=None, traceback_code: Optional[str]=None) -> None: 'Generates the core part of a wrapper function for a native function.\n\n This expects each argument as a PyObject * named obj_{arg} as a precondition.\n It converts the PyObject *s to the necessary types, checking and unboxing if necessary,\n makes the call, then boxes the result if necessary and returns it.\n ' optional_args = (optional_args or []) cleanups = (cleanups or []) use_goto = bool((cleanups or traceback_code)) error = (ReturnHandler('NULL') if (not use_goto) else GotoHandler('fail')) arg_names = (arg_names or [arg.name for arg in fn.args]) for (arg_name, arg) in zip(arg_names, fn.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, emitter, error, optional=(arg in optional_args)) native_args = ', '.join(('arg_{}'.format(arg) for arg in arg_names)) if (fn.ret_type.is_unboxed or use_goto): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names), native_args)) emitter.emit_lines(*cleanups) if fn.ret_type.is_unboxed: emitter.emit_error_check('retval', fn.ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', fn.ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if fn.ret_type.is_unboxed else 'retval'))) else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, fn.cname(emitter.names), native_args)) if use_goto: emitter.emit_label('fail') emitter.emit_lines(*cleanups) if traceback_code: emitter.emit_lines(traceback_code) emitter.emit_lines('return NULL;')
def generate_wrapper_core(fn: FuncIR, emitter: Emitter, optional_args: Optional[List[RuntimeArg]]=None, arg_names: Optional[List[str]]=None, cleanups: Optional[List[str]]=None, traceback_code: Optional[str]=None) -> None: 'Generates the core part of a wrapper function for a native function.\n\n This expects each argument as a PyObject * named obj_{arg} as a precondition.\n It converts the PyObject *s to the necessary types, checking and unboxing if necessary,\n makes the call, then boxes the result if necessary and returns it.\n ' optional_args = (optional_args or []) cleanups = (cleanups or []) use_goto = bool((cleanups or traceback_code)) error = (ReturnHandler('NULL') if (not use_goto) else GotoHandler('fail')) arg_names = (arg_names or [arg.name for arg in fn.args]) for (arg_name, arg) in zip(arg_names, fn.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, emitter, error, optional=(arg in optional_args)) native_args = ', '.join(('arg_{}'.format(arg) for arg in arg_names)) if (fn.ret_type.is_unboxed or use_goto): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type), NATIVE_PREFIX, fn.cname(emitter.names), native_args)) emitter.emit_lines(*cleanups) if fn.ret_type.is_unboxed: emitter.emit_error_check('retval', fn.ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', fn.ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if fn.ret_type.is_unboxed else 'retval'))) else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, fn.cname(emitter.names), native_args)) if use_goto: emitter.emit_label('fail') emitter.emit_lines(*cleanups) if traceback_code: emitter.emit_lines(traceback_code) emitter.emit_lines('return NULL;')<|docstring|>Generates the core part of a wrapper function for a native function. This expects each argument as a PyObject * named obj_{arg} as a precondition. It converts the PyObject *s to the necessary types, checking and unboxing if necessary, makes the call, then boxes the result if necessary and returns it.<|endoftext|>
5452df23ef39c1bf343d01db1bcd457f797697a3eab91ca58c672c93ae1506e5
def generate_arg_check(name: str, typ: RType, emitter: Emitter, error: Optional[ErrorHandler]=None, *, optional: bool=False, raise_exception: bool=True) -> None: 'Insert a runtime check for argument and unbox if necessary.\n\n The object is named PyObject *obj_{}. This is expected to generate\n a value of name arg_{} (unboxed if necessary). For each primitive a runtime\n check ensures the correct type.\n ' error = (error or AssignHandler()) if typ.is_unboxed: emitter.emit_unbox('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, borrow=True, optional=optional) elif is_object_rprimitive(typ): if optional: emitter.emit_line('PyObject *arg_{};'.format(name)) emitter.emit_line('if (obj_{} == NULL) {{'.format(name)) emitter.emit_line('arg_{} = {};'.format(name, emitter.c_error_value(typ))) emitter.emit_lines('} else {', 'arg_{} = obj_{}; '.format(name, name), '}') else: emitter.emit_line('PyObject *arg_{} = obj_{};'.format(name, name)) else: emitter.emit_cast('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, optional=optional)
Insert a runtime check for argument and unbox if necessary. The object is named PyObject *obj_{}. This is expected to generate a value of name arg_{} (unboxed if necessary). For each primitive a runtime check ensures the correct type.
mypyc/codegen/emitwrapper.py
generate_arg_check
srittau/mypy
12,496
python
def generate_arg_check(name: str, typ: RType, emitter: Emitter, error: Optional[ErrorHandler]=None, *, optional: bool=False, raise_exception: bool=True) -> None: 'Insert a runtime check for argument and unbox if necessary.\n\n The object is named PyObject *obj_{}. This is expected to generate\n a value of name arg_{} (unboxed if necessary). For each primitive a runtime\n check ensures the correct type.\n ' error = (error or AssignHandler()) if typ.is_unboxed: emitter.emit_unbox('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, borrow=True, optional=optional) elif is_object_rprimitive(typ): if optional: emitter.emit_line('PyObject *arg_{};'.format(name)) emitter.emit_line('if (obj_{} == NULL) {{'.format(name)) emitter.emit_line('arg_{} = {};'.format(name, emitter.c_error_value(typ))) emitter.emit_lines('} else {', 'arg_{} = obj_{}; '.format(name, name), '}') else: emitter.emit_line('PyObject *arg_{} = obj_{};'.format(name, name)) else: emitter.emit_cast('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, optional=optional)
def generate_arg_check(name: str, typ: RType, emitter: Emitter, error: Optional[ErrorHandler]=None, *, optional: bool=False, raise_exception: bool=True) -> None: 'Insert a runtime check for argument and unbox if necessary.\n\n The object is named PyObject *obj_{}. This is expected to generate\n a value of name arg_{} (unboxed if necessary). For each primitive a runtime\n check ensures the correct type.\n ' error = (error or AssignHandler()) if typ.is_unboxed: emitter.emit_unbox('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, borrow=True, optional=optional) elif is_object_rprimitive(typ): if optional: emitter.emit_line('PyObject *arg_{};'.format(name)) emitter.emit_line('if (obj_{} == NULL) {{'.format(name)) emitter.emit_line('arg_{} = {};'.format(name, emitter.c_error_value(typ))) emitter.emit_lines('} else {', 'arg_{} = obj_{}; '.format(name, name), '}') else: emitter.emit_line('PyObject *arg_{} = obj_{};'.format(name, name)) else: emitter.emit_cast('obj_{}'.format(name), 'arg_{}'.format(name), typ, declare_dest=True, raise_exception=raise_exception, error=error, optional=optional)<|docstring|>Insert a runtime check for argument and unbox if necessary. The object is named PyObject *obj_{}. This is expected to generate a value of name arg_{} (unboxed if necessary). For each primitive a runtime check ensures the correct type.<|endoftext|>
8d6797d9118a61e18fee9ac22e8f3bb84012ef310bdd0a50811c65d84951416c
def set_target(self, fn: FuncIR) -> None: "Set the wrapped function.\n\n It's fine to modify the attributes initialized here later to customize\n the wrapper function.\n " self.target_name = fn.name self.target_cname = fn.cname(self.emitter.names) self.arg_names = [arg.name for arg in fn.args] self.args = fn.args[:] self.ret_type = fn.ret_type
Set the wrapped function. It's fine to modify the attributes initialized here later to customize the wrapper function.
mypyc/codegen/emitwrapper.py
set_target
srittau/mypy
12,496
python
def set_target(self, fn: FuncIR) -> None: "Set the wrapped function.\n\n It's fine to modify the attributes initialized here later to customize\n the wrapper function.\n " self.target_name = fn.name self.target_cname = fn.cname(self.emitter.names) self.arg_names = [arg.name for arg in fn.args] self.args = fn.args[:] self.ret_type = fn.ret_type
def set_target(self, fn: FuncIR) -> None: "Set the wrapped function.\n\n It's fine to modify the attributes initialized here later to customize\n the wrapper function.\n " self.target_name = fn.name self.target_cname = fn.cname(self.emitter.names) self.arg_names = [arg.name for arg in fn.args] self.args = fn.args[:] self.ret_type = fn.ret_type<|docstring|>Set the wrapped function. It's fine to modify the attributes initialized here later to customize the wrapper function.<|endoftext|>
53e5039341b87f219c240f467d46f0102a3846de7e4ad94010abfc6c46c2ba5a
def wrapper_name(self) -> str: 'Return the name of the wrapper function.' return '{}{}{}'.format(DUNDER_PREFIX, self.target_name, self.cl.name_prefix(self.emitter.names))
Return the name of the wrapper function.
mypyc/codegen/emitwrapper.py
wrapper_name
srittau/mypy
12,496
python
def wrapper_name(self) -> str: return '{}{}{}'.format(DUNDER_PREFIX, self.target_name, self.cl.name_prefix(self.emitter.names))
def wrapper_name(self) -> str: return '{}{}{}'.format(DUNDER_PREFIX, self.target_name, self.cl.name_prefix(self.emitter.names))<|docstring|>Return the name of the wrapper function.<|endoftext|>
a847f3d78060ddd9a59edcae45b9c58b05cbc763249266602997c6b040e2d49e
def use_goto(self) -> bool: 'Do we use a goto for error handling (instead of straight return)?' return bool((self.cleanups or self.traceback_code))
Do we use a goto for error handling (instead of straight return)?
mypyc/codegen/emitwrapper.py
use_goto
srittau/mypy
12,496
python
def use_goto(self) -> bool: return bool((self.cleanups or self.traceback_code))
def use_goto(self) -> bool: return bool((self.cleanups or self.traceback_code))<|docstring|>Do we use a goto for error handling (instead of straight return)?<|endoftext|>
476b93f6e6f55f869c7d9fb13127b8f21f900cf98da766aaa78b6868555628f1
def emit_header(self) -> None: 'Emit the function header of the wrapper implementation.' input_args = ', '.join(('PyObject *obj_{}'.format(arg) for arg in self.arg_names)) self.emitter.emit_line('static PyObject *{name}({input_args}) {{'.format(name=self.wrapper_name(), input_args=input_args))
Emit the function header of the wrapper implementation.
mypyc/codegen/emitwrapper.py
emit_header
srittau/mypy
12,496
python
def emit_header(self) -> None: input_args = ', '.join(('PyObject *obj_{}'.format(arg) for arg in self.arg_names)) self.emitter.emit_line('static PyObject *{name}({input_args}) {{'.format(name=self.wrapper_name(), input_args=input_args))
def emit_header(self) -> None: input_args = ', '.join(('PyObject *obj_{}'.format(arg) for arg in self.arg_names)) self.emitter.emit_line('static PyObject *{name}({input_args}) {{'.format(name=self.wrapper_name(), input_args=input_args))<|docstring|>Emit the function header of the wrapper implementation.<|endoftext|>
7f4cd9b38e627332de5c6b207437838ceec5319b1cb6686ee9d63e956e01d836
def emit_arg_processing(self, error: Optional[ErrorHandler]=None, raise_exception: bool=True) -> None: 'Emit validation and unboxing of arguments.' error = (error or self.error()) for (arg_name, arg) in zip(self.arg_names, self.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, self.emitter, error, raise_exception=raise_exception, optional=(arg in self.optional_args))
Emit validation and unboxing of arguments.
mypyc/codegen/emitwrapper.py
emit_arg_processing
srittau/mypy
12,496
python
def emit_arg_processing(self, error: Optional[ErrorHandler]=None, raise_exception: bool=True) -> None: error = (error or self.error()) for (arg_name, arg) in zip(self.arg_names, self.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, self.emitter, error, raise_exception=raise_exception, optional=(arg in self.optional_args))
def emit_arg_processing(self, error: Optional[ErrorHandler]=None, raise_exception: bool=True) -> None: error = (error or self.error()) for (arg_name, arg) in zip(self.arg_names, self.args): typ = (arg.type if (arg.kind not in (ARG_STAR, ARG_STAR2)) else object_rprimitive) generate_arg_check(arg_name, typ, self.emitter, error, raise_exception=raise_exception, optional=(arg in self.optional_args))<|docstring|>Emit validation and unboxing of arguments.<|endoftext|>
a2edc369503f3529f5f936ae90dec8218d51bed871ae1f3e24b0b3f578417439
def emit_call(self, not_implemented_handler: str='') -> None: "Emit call to the wrapper function.\n\n If not_implemented_handler is non-empty, use this C code to handle\n a NotImplemented return value (if it's possible based on the return type).\n " native_args = ', '.join(('arg_{}'.format(arg) for arg in self.arg_names)) ret_type = self.ret_type emitter = self.emitter if (ret_type.is_unboxed or self.use_goto()): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(ret_type), NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines(*self.cleanups) if ret_type.is_unboxed: emitter.emit_error_check('retval', ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if ret_type.is_unboxed else 'retval'))) elif (not_implemented_handler and (not isinstance(ret_type, RInstance))): emitter.emit_line('PyObject *retbox = {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines('if (retbox == Py_NotImplemented) {', not_implemented_handler, '}', 'return retbox;') else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args))
Emit call to the wrapper function. If not_implemented_handler is non-empty, use this C code to handle a NotImplemented return value (if it's possible based on the return type).
mypyc/codegen/emitwrapper.py
emit_call
srittau/mypy
12,496
python
def emit_call(self, not_implemented_handler: str=) -> None: "Emit call to the wrapper function.\n\n If not_implemented_handler is non-empty, use this C code to handle\n a NotImplemented return value (if it's possible based on the return type).\n " native_args = ', '.join(('arg_{}'.format(arg) for arg in self.arg_names)) ret_type = self.ret_type emitter = self.emitter if (ret_type.is_unboxed or self.use_goto()): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(ret_type), NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines(*self.cleanups) if ret_type.is_unboxed: emitter.emit_error_check('retval', ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if ret_type.is_unboxed else 'retval'))) elif (not_implemented_handler and (not isinstance(ret_type, RInstance))): emitter.emit_line('PyObject *retbox = {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines('if (retbox == Py_NotImplemented) {', not_implemented_handler, '}', 'return retbox;') else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args))
def emit_call(self, not_implemented_handler: str=) -> None: "Emit call to the wrapper function.\n\n If not_implemented_handler is non-empty, use this C code to handle\n a NotImplemented return value (if it's possible based on the return type).\n " native_args = ', '.join(('arg_{}'.format(arg) for arg in self.arg_names)) ret_type = self.ret_type emitter = self.emitter if (ret_type.is_unboxed or self.use_goto()): emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(ret_type), NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines(*self.cleanups) if ret_type.is_unboxed: emitter.emit_error_check('retval', ret_type, 'return NULL;') emitter.emit_box('retval', 'retbox', ret_type, declare_dest=True) emitter.emit_line('return {};'.format(('retbox' if ret_type.is_unboxed else 'retval'))) elif (not_implemented_handler and (not isinstance(ret_type, RInstance))): emitter.emit_line('PyObject *retbox = {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args)) emitter.emit_lines('if (retbox == Py_NotImplemented) {', not_implemented_handler, '}', 'return retbox;') else: emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX, self.target_cname, native_args))<|docstring|>Emit call to the wrapper function. If not_implemented_handler is non-empty, use this C code to handle a NotImplemented return value (if it's possible based on the return type).<|endoftext|>
12eb4f3f3fa68cf1f17c1d85526c0f6cfbb824eb3bb4038d605451aebd25d83c
def error(self) -> ErrorHandler: 'Figure out how to deal with errors in the wrapper.' if (self.cleanups or self.traceback_code): return GotoHandler('fail') else: return ReturnHandler('NULL')
Figure out how to deal with errors in the wrapper.
mypyc/codegen/emitwrapper.py
error
srittau/mypy
12,496
python
def error(self) -> ErrorHandler: if (self.cleanups or self.traceback_code): return GotoHandler('fail') else: return ReturnHandler('NULL')
def error(self) -> ErrorHandler: if (self.cleanups or self.traceback_code): return GotoHandler('fail') else: return ReturnHandler('NULL')<|docstring|>Figure out how to deal with errors in the wrapper.<|endoftext|>
220c6d9997885388f5cbfb5cea70a450400821223a163663942cf1203b48f89d
def emit_error_handling(self) -> None: 'Emit error handling block at the end of the wrapper, if needed.' emitter = self.emitter if self.use_goto(): emitter.emit_label('fail') emitter.emit_lines(*self.cleanups) if self.traceback_code: emitter.emit_line(self.traceback_code) emitter.emit_line('return NULL;')
Emit error handling block at the end of the wrapper, if needed.
mypyc/codegen/emitwrapper.py
emit_error_handling
srittau/mypy
12,496
python
def emit_error_handling(self) -> None: emitter = self.emitter if self.use_goto(): emitter.emit_label('fail') emitter.emit_lines(*self.cleanups) if self.traceback_code: emitter.emit_line(self.traceback_code) emitter.emit_line('return NULL;')
def emit_error_handling(self) -> None: emitter = self.emitter if self.use_goto(): emitter.emit_label('fail') emitter.emit_lines(*self.cleanups) if self.traceback_code: emitter.emit_line(self.traceback_code) emitter.emit_line('return NULL;')<|docstring|>Emit error handling block at the end of the wrapper, if needed.<|endoftext|>
6d76747af264ddfcda136ba2579155d115193b2bbe698bda6a4f87f52c16a184
def add_target(graph_path, target_name, target_list, sep=' '): "Add a target to all the graphs contains in hdf5 files\n\n Args:\n graph_path (str, list(str)): either a directory containing all the hdf5 files,\n or a single hdf5 filename\n or a list of hdf5 filenames\n target_name (str): the name of the new target\n target_list (str): name of the file containing the data\n sep (str, optional): separator in target list. Defaults to ' '.\n\n Notes:\n The input target list should respect the following format :\n 1ATN_xxx-1 0\n 1ATN_xxx-2 1\n 1ATN_xxx-3 0\n 1ATN_xxx-4 0\n " target_dict = {} labels = np.loadtxt(target_list, delimiter=sep, usecols=[0], dtype=str) values = np.loadtxt(target_list, delimiter=sep, usecols=[1]) for (label, value) in zip(labels, values): target_dict[label] = value if os.path.isdir(graph_path): graphs = glob.glob(f'{graph_path}/*.hdf5') elif os.path.isfile(graph_path): graphs = [graph_path] else: assert isinstance(graph_path, list) assert os.path.isfile(graph_path[0]) for hdf5 in graphs: print(hdf5) try: f5 = h5py.File(hdf5, 'a') for (model, _) in target_dict.items(): if (model not in f5): raise ValueError(f'{hdf5} does not contain an entry named {model}') try: model_gp = f5[model] if ('score' not in model_gp): model_gp.create_group('score') group = f5[f'{model}/score/'] if (target_name in group.keys()): del group[target_name] group.create_dataset(target_name, data=target_dict[model]) except BaseException: print(f'no graph for {model}') f5.close() except BaseException: print(f'no graph for {hdf5}')
Add a target to all the graphs contains in hdf5 files Args: graph_path (str, list(str)): either a directory containing all the hdf5 files, or a single hdf5 filename or a list of hdf5 filenames target_name (str): the name of the new target target_list (str): name of the file containing the data sep (str, optional): separator in target list. Defaults to ' '. Notes: The input target list should respect the following format : 1ATN_xxx-1 0 1ATN_xxx-2 1 1ATN_xxx-3 0 1ATN_xxx-4 0
deeprankcore/tools/CustomizeGraph.py
add_target
DeepRank/deeprank-gnn-2
0
python
def add_target(graph_path, target_name, target_list, sep=' '): "Add a target to all the graphs contains in hdf5 files\n\n Args:\n graph_path (str, list(str)): either a directory containing all the hdf5 files,\n or a single hdf5 filename\n or a list of hdf5 filenames\n target_name (str): the name of the new target\n target_list (str): name of the file containing the data\n sep (str, optional): separator in target list. Defaults to ' '.\n\n Notes:\n The input target list should respect the following format :\n 1ATN_xxx-1 0\n 1ATN_xxx-2 1\n 1ATN_xxx-3 0\n 1ATN_xxx-4 0\n " target_dict = {} labels = np.loadtxt(target_list, delimiter=sep, usecols=[0], dtype=str) values = np.loadtxt(target_list, delimiter=sep, usecols=[1]) for (label, value) in zip(labels, values): target_dict[label] = value if os.path.isdir(graph_path): graphs = glob.glob(f'{graph_path}/*.hdf5') elif os.path.isfile(graph_path): graphs = [graph_path] else: assert isinstance(graph_path, list) assert os.path.isfile(graph_path[0]) for hdf5 in graphs: print(hdf5) try: f5 = h5py.File(hdf5, 'a') for (model, _) in target_dict.items(): if (model not in f5): raise ValueError(f'{hdf5} does not contain an entry named {model}') try: model_gp = f5[model] if ('score' not in model_gp): model_gp.create_group('score') group = f5[f'{model}/score/'] if (target_name in group.keys()): del group[target_name] group.create_dataset(target_name, data=target_dict[model]) except BaseException: print(f'no graph for {model}') f5.close() except BaseException: print(f'no graph for {hdf5}')
def add_target(graph_path, target_name, target_list, sep=' '): "Add a target to all the graphs contains in hdf5 files\n\n Args:\n graph_path (str, list(str)): either a directory containing all the hdf5 files,\n or a single hdf5 filename\n or a list of hdf5 filenames\n target_name (str): the name of the new target\n target_list (str): name of the file containing the data\n sep (str, optional): separator in target list. Defaults to ' '.\n\n Notes:\n The input target list should respect the following format :\n 1ATN_xxx-1 0\n 1ATN_xxx-2 1\n 1ATN_xxx-3 0\n 1ATN_xxx-4 0\n " target_dict = {} labels = np.loadtxt(target_list, delimiter=sep, usecols=[0], dtype=str) values = np.loadtxt(target_list, delimiter=sep, usecols=[1]) for (label, value) in zip(labels, values): target_dict[label] = value if os.path.isdir(graph_path): graphs = glob.glob(f'{graph_path}/*.hdf5') elif os.path.isfile(graph_path): graphs = [graph_path] else: assert isinstance(graph_path, list) assert os.path.isfile(graph_path[0]) for hdf5 in graphs: print(hdf5) try: f5 = h5py.File(hdf5, 'a') for (model, _) in target_dict.items(): if (model not in f5): raise ValueError(f'{hdf5} does not contain an entry named {model}') try: model_gp = f5[model] if ('score' not in model_gp): model_gp.create_group('score') group = f5[f'{model}/score/'] if (target_name in group.keys()): del group[target_name] group.create_dataset(target_name, data=target_dict[model]) except BaseException: print(f'no graph for {model}') f5.close() except BaseException: print(f'no graph for {hdf5}')<|docstring|>Add a target to all the graphs contains in hdf5 files Args: graph_path (str, list(str)): either a directory containing all the hdf5 files, or a single hdf5 filename or a list of hdf5 filenames target_name (str): the name of the new target target_list (str): name of the file containing the data sep (str, optional): separator in target list. Defaults to ' '. Notes: The input target list should respect the following format : 1ATN_xxx-1 0 1ATN_xxx-2 1 1ATN_xxx-3 0 1ATN_xxx-4 0<|endoftext|>
5cb1c0b3d4515a4a43d7e7e69f6e2548d63c9feb01095787dea8e181230ea47d
def start_payment(request, pk): 'Start payment for a form' api = PolyBanking(settings.POLYBANKING_SERVER, settings.POLYBANKING_ID, settings.POLYBANKING_KEY_REQUEST, settings.POLYBANKING_KEY_IPN, settings.POLYBANKING_KEY_API) entry = get_object_or_404(FormEntry, pk=pk) if (not entry.form.need_payment): raise Http404 payment = entry.get_payment() error = '' if (not entry.form.can_start_payment()): error = 'form_full' elif (payment.started and (not payment.is_valid)): request.session['current_payment'] = payment.id return HttpResponseRedirect(payment.redirect_url) if payment.is_valid: error = 'payment_already_ok' if (not error): for field in entry.form.fields.all(): if (field.field_type == EMAIL): email_to = payment.entry.fields.filter(field_id=field.id).first().value (error, url) = api.new_transaction(str((entry.form.amount * 100)), payment.reference(), ((unicode(entry.form) + ' - ') + unicode(email_to))) if (error != 'OK'): return render_to_response('forms/error.html', {'error': error}, context_instance=RequestContext(request)) else: payment.redirect_url = url payment.started = True payment.save() request.session['current_payment'] = payment.id return HttpResponseRedirect(url)
Start payment for a form
mezzanine/forms/views.py
start_payment
agepoly/mezzanine
0
python
def start_payment(request, pk): api = PolyBanking(settings.POLYBANKING_SERVER, settings.POLYBANKING_ID, settings.POLYBANKING_KEY_REQUEST, settings.POLYBANKING_KEY_IPN, settings.POLYBANKING_KEY_API) entry = get_object_or_404(FormEntry, pk=pk) if (not entry.form.need_payment): raise Http404 payment = entry.get_payment() error = if (not entry.form.can_start_payment()): error = 'form_full' elif (payment.started and (not payment.is_valid)): request.session['current_payment'] = payment.id return HttpResponseRedirect(payment.redirect_url) if payment.is_valid: error = 'payment_already_ok' if (not error): for field in entry.form.fields.all(): if (field.field_type == EMAIL): email_to = payment.entry.fields.filter(field_id=field.id).first().value (error, url) = api.new_transaction(str((entry.form.amount * 100)), payment.reference(), ((unicode(entry.form) + ' - ') + unicode(email_to))) if (error != 'OK'): return render_to_response('forms/error.html', {'error': error}, context_instance=RequestContext(request)) else: payment.redirect_url = url payment.started = True payment.save() request.session['current_payment'] = payment.id return HttpResponseRedirect(url)
def start_payment(request, pk): api = PolyBanking(settings.POLYBANKING_SERVER, settings.POLYBANKING_ID, settings.POLYBANKING_KEY_REQUEST, settings.POLYBANKING_KEY_IPN, settings.POLYBANKING_KEY_API) entry = get_object_or_404(FormEntry, pk=pk) if (not entry.form.need_payment): raise Http404 payment = entry.get_payment() error = if (not entry.form.can_start_payment()): error = 'form_full' elif (payment.started and (not payment.is_valid)): request.session['current_payment'] = payment.id return HttpResponseRedirect(payment.redirect_url) if payment.is_valid: error = 'payment_already_ok' if (not error): for field in entry.form.fields.all(): if (field.field_type == EMAIL): email_to = payment.entry.fields.filter(field_id=field.id).first().value (error, url) = api.new_transaction(str((entry.form.amount * 100)), payment.reference(), ((unicode(entry.form) + ' - ') + unicode(email_to))) if (error != 'OK'): return render_to_response('forms/error.html', {'error': error}, context_instance=RequestContext(request)) else: payment.redirect_url = url payment.started = True payment.save() request.session['current_payment'] = payment.id return HttpResponseRedirect(url)<|docstring|>Start payment for a form<|endoftext|>
9d3d4215975006c13edb83eda3db04eaa9bbcfa1d9b5e3089c28444afe280da0
def partial_schema(schema, filtered_fields): '\n Validator for part of a schema, ignoring some fields\n\n :param schema: the Schema\n :param filtered_fields: fields to filter out\n ' return Schema({k: v for (k, v) in schema.schema.items() if (getattr(k, 'schema', k) not in filtered_fields)}, extra=ALLOW_EXTRA)
Validator for part of a schema, ignoring some fields :param schema: the Schema :param filtered_fields: fields to filter out
perch/validators.py
partial_schema
OpenPermissions/perch
3
python
def partial_schema(schema, filtered_fields): '\n Validator for part of a schema, ignoring some fields\n\n :param schema: the Schema\n :param filtered_fields: fields to filter out\n ' return Schema({k: v for (k, v) in schema.schema.items() if (getattr(k, 'schema', k) not in filtered_fields)}, extra=ALLOW_EXTRA)
def partial_schema(schema, filtered_fields): '\n Validator for part of a schema, ignoring some fields\n\n :param schema: the Schema\n :param filtered_fields: fields to filter out\n ' return Schema({k: v for (k, v) in schema.schema.items() if (getattr(k, 'schema', k) not in filtered_fields)}, extra=ALLOW_EXTRA)<|docstring|>Validator for part of a schema, ignoring some fields :param schema: the Schema :param filtered_fields: fields to filter out<|endoftext|>
3266c3cc9c1a9b5a642e880a49eb5d55e0e94d9b6f60bc2c249e477a916e9f11
def valid_email(email): 'Validate email.' if ('@' not in email): raise Invalid('This email is invalid.') return email
Validate email.
perch/validators.py
valid_email
OpenPermissions/perch
3
python
def valid_email(email): if ('@' not in email): raise Invalid('This email is invalid.') return email
def valid_email(email): if ('@' not in email): raise Invalid('This email is invalid.') return email<|docstring|>Validate email.<|endoftext|>
7f50d557f8ba04a7852d6346197a70f4e92924dbd66b89dfcd9655d1e79533c7
def validate_hex(color): '\n Validate string is a hex color code\n ' hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$' if (not re.match(hex_re, color)): raise Invalid('Invalid Hex Color') return color
Validate string is a hex color code
perch/validators.py
validate_hex
OpenPermissions/perch
3
python
def validate_hex(color): '\n \n ' hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$' if (not re.match(hex_re, color)): raise Invalid('Invalid Hex Color') return color
def validate_hex(color): '\n \n ' hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$' if (not re.match(hex_re, color)): raise Invalid('Invalid Hex Color') return color<|docstring|>Validate string is a hex color code<|endoftext|>
bb4c12579594ea012f26a31d999dcd149c7fe856ba19c3bd2fb4211fbe8788fa
def validate_url(url): 'Validate URL is valid\n\n NOTE: only support http & https\n ' schemes = ['http', 'https'] netloc_re = re.compile('^(?:\\S+(?::\\S*)?@)?(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9])(?:\\.(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9]))*(?::[0-9]{2,5})?$', re.IGNORECASE) try: (scheme, netloc, path, query, fragment) = urlsplit(url) except ValueError: raise Invalid('Invalid URL') if (scheme not in schemes): raise Invalid('Missing URL scheme') if (not netloc_re.search(netloc)): raise Invalid('Invalid URL') return url
Validate URL is valid NOTE: only support http & https
perch/validators.py
validate_url
OpenPermissions/perch
3
python
def validate_url(url): 'Validate URL is valid\n\n NOTE: only support http & https\n ' schemes = ['http', 'https'] netloc_re = re.compile('^(?:\\S+(?::\\S*)?@)?(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9])(?:\\.(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9]))*(?::[0-9]{2,5})?$', re.IGNORECASE) try: (scheme, netloc, path, query, fragment) = urlsplit(url) except ValueError: raise Invalid('Invalid URL') if (scheme not in schemes): raise Invalid('Missing URL scheme') if (not netloc_re.search(netloc)): raise Invalid('Invalid URL') return url
def validate_url(url): 'Validate URL is valid\n\n NOTE: only support http & https\n ' schemes = ['http', 'https'] netloc_re = re.compile('^(?:\\S+(?::\\S*)?@)?(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9])(?:\\.(?:[a-z0-9]|[a-z0-9][a-z0-9\\-]{0,61}[a-z0-9]))*(?::[0-9]{2,5})?$', re.IGNORECASE) try: (scheme, netloc, path, query, fragment) = urlsplit(url) except ValueError: raise Invalid('Invalid URL') if (scheme not in schemes): raise Invalid('Missing URL scheme') if (not netloc_re.search(netloc)): raise Invalid('Invalid URL') return url<|docstring|>Validate URL is valid NOTE: only support http & https<|endoftext|>
eaf0a9e4a6296a8be66547737dafa3846c93e7ca9adb837b1555779699d8d414
def validate_reference_links(reference_links): '\n Vaidate reference links data structure\n\n Expected data structure:\n {\n "links": {\n id_type1: url1,\n id_type2: url2\n },\n "redirect_id_type": id_type1 | id1_type2\n }\n\n where links is an optional key but must be a dictionary with id types to\n URLs if it exists, and redirect_id_type is optional but if it exists,\n it must point to one of the existing id types in the links object. It is\n used to set a default redirect URL that is used by the resolution service.\n ' allowed_keys = ['links', 'redirect_id_type'] if (not isinstance(reference_links, dict)): raise Invalid('Expected reference_links to be an object') if (('links' in reference_links) and (not isinstance(reference_links['links'], dict))): raise Invalid('Expected links in reference_links to be an object') links = reference_links.get('links', {}) redirect_id_type = reference_links.get('redirect_id_type') for key in reference_links: if (key not in allowed_keys): raise Invalid('Key {} is not allowed'.format(key)) if (redirect_id_type and (redirect_id_type not in links)): raise Invalid("Redirect ID type must point to one of the links' ID types") [validate_url(url) for url in links.values()] return reference_links
Vaidate reference links data structure Expected data structure: { "links": { id_type1: url1, id_type2: url2 }, "redirect_id_type": id_type1 | id1_type2 } where links is an optional key but must be a dictionary with id types to URLs if it exists, and redirect_id_type is optional but if it exists, it must point to one of the existing id types in the links object. It is used to set a default redirect URL that is used by the resolution service.
perch/validators.py
validate_reference_links
OpenPermissions/perch
3
python
def validate_reference_links(reference_links): '\n Vaidate reference links data structure\n\n Expected data structure:\n {\n "links": {\n id_type1: url1,\n id_type2: url2\n },\n "redirect_id_type": id_type1 | id1_type2\n }\n\n where links is an optional key but must be a dictionary with id types to\n URLs if it exists, and redirect_id_type is optional but if it exists,\n it must point to one of the existing id types in the links object. It is\n used to set a default redirect URL that is used by the resolution service.\n ' allowed_keys = ['links', 'redirect_id_type'] if (not isinstance(reference_links, dict)): raise Invalid('Expected reference_links to be an object') if (('links' in reference_links) and (not isinstance(reference_links['links'], dict))): raise Invalid('Expected links in reference_links to be an object') links = reference_links.get('links', {}) redirect_id_type = reference_links.get('redirect_id_type') for key in reference_links: if (key not in allowed_keys): raise Invalid('Key {} is not allowed'.format(key)) if (redirect_id_type and (redirect_id_type not in links)): raise Invalid("Redirect ID type must point to one of the links' ID types") [validate_url(url) for url in links.values()] return reference_links
def validate_reference_links(reference_links): '\n Vaidate reference links data structure\n\n Expected data structure:\n {\n "links": {\n id_type1: url1,\n id_type2: url2\n },\n "redirect_id_type": id_type1 | id1_type2\n }\n\n where links is an optional key but must be a dictionary with id types to\n URLs if it exists, and redirect_id_type is optional but if it exists,\n it must point to one of the existing id types in the links object. It is\n used to set a default redirect URL that is used by the resolution service.\n ' allowed_keys = ['links', 'redirect_id_type'] if (not isinstance(reference_links, dict)): raise Invalid('Expected reference_links to be an object') if (('links' in reference_links) and (not isinstance(reference_links['links'], dict))): raise Invalid('Expected links in reference_links to be an object') links = reference_links.get('links', {}) redirect_id_type = reference_links.get('redirect_id_type') for key in reference_links: if (key not in allowed_keys): raise Invalid('Key {} is not allowed'.format(key)) if (redirect_id_type and (redirect_id_type not in links)): raise Invalid("Redirect ID type must point to one of the links' ID types") [validate_url(url) for url in links.values()] return reference_links<|docstring|>Vaidate reference links data structure Expected data structure: { "links": { id_type1: url1, id_type2: url2 }, "redirect_id_type": id_type1 | id1_type2 } where links is an optional key but must be a dictionary with id types to URLs if it exists, and redirect_id_type is optional but if it exists, it must point to one of the existing id types in the links object. It is used to set a default redirect URL that is used by the resolution service.<|endoftext|>
5e2faa4196553647597c84b354ef59d989cf1f3ace75e4e259c5d515602d57ae
def _validate_state(state, valid_states): 'Validate a state string' if (state in State): return state.name elif (state in valid_states): return state else: raise Invalid('Invalid state')
Validate a state string
perch/validators.py
_validate_state
OpenPermissions/perch
3
python
def _validate_state(state, valid_states): if (state in State): return state.name elif (state in valid_states): return state else: raise Invalid('Invalid state')
def _validate_state(state, valid_states): if (state in State): return state.name elif (state in valid_states): return state else: raise Invalid('Invalid state')<|docstring|>Validate a state string<|endoftext|>
975f4949c126cfdcfcc1c0e8d47bc71796f2a85507c67bb2a5b9ff4a8f299cdd
def __init__(self, key=None): '\n init key variable\n :param key:\n :return:\n ' self.key = key self._set_key_parms(['type']) self._set_prhb_parms(['type'])
init key variable :param key: :return:
skp_edu_docker/code/master/workflow/evalconf/workflow_evalconf.py
__init__
TensorMSA/hoyai_docker
8
python
def __init__(self, key=None): '\n init key variable\n :param key:\n :return:\n ' self.key = key self._set_key_parms(['type']) self._set_prhb_parms(['type'])
def __init__(self, key=None): '\n init key variable\n :param key:\n :return:\n ' self.key = key self._set_key_parms(['type']) self._set_prhb_parms(['type'])<|docstring|>init key variable :param key: :return:<|endoftext|>
0d2e146200072634e06a6735c567e54dd01defb220d106bc0c9c9a650ec1c81d
def get_eval_type(self): '\n get eval type ( regression, classification.. )\n :return:\n ' if ('conf' not in self.__dict__): self.conf = self.get_view_obj(self.key) return self.conf.get('type')
get eval type ( regression, classification.. ) :return:
skp_edu_docker/code/master/workflow/evalconf/workflow_evalconf.py
get_eval_type
TensorMSA/hoyai_docker
8
python
def get_eval_type(self): '\n get eval type ( regression, classification.. )\n :return:\n ' if ('conf' not in self.__dict__): self.conf = self.get_view_obj(self.key) return self.conf.get('type')
def get_eval_type(self): '\n get eval type ( regression, classification.. )\n :return:\n ' if ('conf' not in self.__dict__): self.conf = self.get_view_obj(self.key) return self.conf.get('type')<|docstring|>get eval type ( regression, classification.. ) :return:<|endoftext|>
8a76778d847e4a2a1507e0b69d250a026a22738c4f19ff7f4f6c721969cd9fda
def read_file(path): 'Read a file and return its contents as a string.' with open(path, 'r') as f: return f.read()
Read a file and return its contents as a string.
zazu/style.py
read_file
stopthatcow/zazu
2
python
def read_file(path): with open(path, 'r') as f: return f.read()
def read_file(path): with open(path, 'r') as f: return f.read()<|docstring|>Read a file and return its contents as a string.<|endoftext|>
2ba86b8d49a971e1e7b23e264dca8c3b7ddf238cf8b55097b28e362796afabfd
def write_file(path, _, styled_string): 'Write styled_string string to a file.' with open(path, 'w') as f: return f.write(styled_string)
Write styled_string string to a file.
zazu/style.py
write_file
stopthatcow/zazu
2
python
def write_file(path, _, styled_string): with open(path, 'w') as f: return f.write(styled_string)
def write_file(path, _, styled_string): with open(path, 'w') as f: return f.write(styled_string)<|docstring|>Write styled_string string to a file.<|endoftext|>
67ac19e01b23d602e3c83bdc00d2cbd659495dfa4ccc011e23c0a2b6d7a0d6a8
def stage_patch(path, input_string, styled_string): 'Create a patch between input_string and output_string and add the patch to the git staging area.\n\n Args:\n path: the path of the file being patched.\n input_string: the current state of the file in the git stage.\n styled_string: the properly styled string to stage.\n\n ' if (read_file(path) == input_string): write_file(path, '', styled_string) with git_lock: zazu.util.check_output(['git', 'add', path]) else: input_lines = input_string.splitlines() styled_lines = styled_string.splitlines() patch = difflib.unified_diff(input_lines, styled_lines, ('a/' + path), ('b/' + path), lineterm='') patch_string = ('\n'.join(patch) + '\n') if (input_string[(- 1)] != '\n'): raise click.ClickException('File "{}" must have a trailing newline'.format(path)) with git_lock: zazu.util.check_popen(args=['git', 'apply', '--cached', '--verbose', '-'], stdin_str=patch_string, universal_newlines=True)
Create a patch between input_string and output_string and add the patch to the git staging area. Args: path: the path of the file being patched. input_string: the current state of the file in the git stage. styled_string: the properly styled string to stage.
zazu/style.py
stage_patch
stopthatcow/zazu
2
python
def stage_patch(path, input_string, styled_string): 'Create a patch between input_string and output_string and add the patch to the git staging area.\n\n Args:\n path: the path of the file being patched.\n input_string: the current state of the file in the git stage.\n styled_string: the properly styled string to stage.\n\n ' if (read_file(path) == input_string): write_file(path, , styled_string) with git_lock: zazu.util.check_output(['git', 'add', path]) else: input_lines = input_string.splitlines() styled_lines = styled_string.splitlines() patch = difflib.unified_diff(input_lines, styled_lines, ('a/' + path), ('b/' + path), lineterm=) patch_string = ('\n'.join(patch) + '\n') if (input_string[(- 1)] != '\n'): raise click.ClickException('File "{}" must have a trailing newline'.format(path)) with git_lock: zazu.util.check_popen(args=['git', 'apply', '--cached', '--verbose', '-'], stdin_str=patch_string, universal_newlines=True)
def stage_patch(path, input_string, styled_string): 'Create a patch between input_string and output_string and add the patch to the git staging area.\n\n Args:\n path: the path of the file being patched.\n input_string: the current state of the file in the git stage.\n styled_string: the properly styled string to stage.\n\n ' if (read_file(path) == input_string): write_file(path, , styled_string) with git_lock: zazu.util.check_output(['git', 'add', path]) else: input_lines = input_string.splitlines() styled_lines = styled_string.splitlines() patch = difflib.unified_diff(input_lines, styled_lines, ('a/' + path), ('b/' + path), lineterm=) patch_string = ('\n'.join(patch) + '\n') if (input_string[(- 1)] != '\n'): raise click.ClickException('File "{}" must have a trailing newline'.format(path)) with git_lock: zazu.util.check_popen(args=['git', 'apply', '--cached', '--verbose', '-'], stdin_str=patch_string, universal_newlines=True)<|docstring|>Create a patch between input_string and output_string and add the patch to the git staging area. Args: path: the path of the file being patched. input_string: the current state of the file in the git stage. styled_string: the properly styled string to stage.<|endoftext|>
e30691a41f7fe98b8937919fbd78538ca19e1f65abf9bf2c3beb40711e81b0a1
def style_file(stylers, path, read_fn, write_fn): 'Style a file.\n\n Args:\n styler: the styler to use to style the file.\n path: the file path.\n read_fn: function used to read in the file contents.\n write_fn: function used to write out the styled file, or None\n\n ' input_string = read_fn(path) styled_string = input_string for styler in stylers: styled_string = styler.style_string(styled_string, path) violation = (styled_string != input_string) if (violation and callable(write_fn)): write_fn(path, input_string, styled_string) return (path, stylers, violation)
Style a file. Args: styler: the styler to use to style the file. path: the file path. read_fn: function used to read in the file contents. write_fn: function used to write out the styled file, or None
zazu/style.py
style_file
stopthatcow/zazu
2
python
def style_file(stylers, path, read_fn, write_fn): 'Style a file.\n\n Args:\n styler: the styler to use to style the file.\n path: the file path.\n read_fn: function used to read in the file contents.\n write_fn: function used to write out the styled file, or None\n\n ' input_string = read_fn(path) styled_string = input_string for styler in stylers: styled_string = styler.style_string(styled_string, path) violation = (styled_string != input_string) if (violation and callable(write_fn)): write_fn(path, input_string, styled_string) return (path, stylers, violation)
def style_file(stylers, path, read_fn, write_fn): 'Style a file.\n\n Args:\n styler: the styler to use to style the file.\n path: the file path.\n read_fn: function used to read in the file contents.\n write_fn: function used to write out the styled file, or None\n\n ' input_string = read_fn(path) styled_string = input_string for styler in stylers: styled_string = styler.style_string(styled_string, path) violation = (styled_string != input_string) if (violation and callable(write_fn)): write_fn(path, input_string, styled_string) return (path, stylers, violation)<|docstring|>Style a file. Args: styler: the styler to use to style the file. path: the file path. read_fn: function used to read in the file contents. write_fn: function used to write out the styled file, or None<|endoftext|>
2fac8af2f0e0c9dc2d12227bd7968fa71501843d5569b0c79b0828ded5d0d312
def styler_list(file, sets, keys): 'Get the list of stylers to apply to a file based on the file set of each styler.' return [s for s in keys if (file in sets[s])]
Get the list of stylers to apply to a file based on the file set of each styler.
zazu/style.py
styler_list
stopthatcow/zazu
2
python
def styler_list(file, sets, keys): return [s for s in keys if (file in sets[s])]
def styler_list(file, sets, keys): return [s for s in keys if (file in sets[s])]<|docstring|>Get the list of stylers to apply to a file based on the file set of each styler.<|endoftext|>
bd4c0959b8512f83c461f666b61d4019fd43dd45dcabf4e462f354e7a6d5df8a
@click.command() @zazu.config.pass_config @click.option('-v', '--verbose', is_flag=True, help='print files that are dirty') @click.option('--check', is_flag=True, help='only check the repo for style violations, do not correct them') @click.option('--cached', is_flag=True, help='only examine/fix files that are staged for SCM commit') def style(config, verbose, check, cached): 'Style repo files or check that they are valid style.' config.check_repo() violation_count = 0 stylers = config.stylers() fixed_ok_tags = [click.style('FIXED', fg='red', bold=True), click.style(' OK ', fg='green', bold=True)] tags = (zazu.util.FAIL_OK if check else fixed_ok_tags) with zazu.util.cd(config.repo_root): if stylers: if cached: staged_files = zazu.git_helper.get_touched_files(config.repo) read_fn = zazu.git_helper.read_staged write_fn = stage_patch else: read_fn = read_file write_fn = write_file if check: write_fn = None file_sets = {} styler_file_sets = {} all_files = set() for s in stylers: includes = tuple(s.includes) excludes = tuple(s.excludes) if ((includes, excludes) not in file_sets): files = set(zazu.util.scantree(config.repo_root, includes, excludes, exclude_hidden=True)) if cached: files = files.intersection(staged_files) file_sets[(includes, excludes)] = files else: files = file_sets[(includes, excludes)] styler_file_sets[s] = files all_files |= files work = [functools.partial(style_file, styler_list(f, styler_file_sets, stylers), f, read_fn, write_fn) for f in all_files] checked_files = zazu.util.dispatch(work) for (f, stylers, violation) in checked_files: if verbose: click.echo(zazu.util.format_checklist_item((not violation), text='({}) {}'.format(', '.join([s.name() for s in stylers]), f), tag_formats=tags)) violation_count += violation if verbose: file_count = len(all_files) if check: click.echo('{} files with violations in {} files'.format(violation_count, file_count)) else: click.echo('{} files fixed in {} files'.format(violation_count, file_count)) sys.exit(((- 1) if (check and violation_count) else 0)) else: click.echo('no style settings found')
Style repo files or check that they are valid style.
zazu/style.py
style
stopthatcow/zazu
2
python
@click.command() @zazu.config.pass_config @click.option('-v', '--verbose', is_flag=True, help='print files that are dirty') @click.option('--check', is_flag=True, help='only check the repo for style violations, do not correct them') @click.option('--cached', is_flag=True, help='only examine/fix files that are staged for SCM commit') def style(config, verbose, check, cached): config.check_repo() violation_count = 0 stylers = config.stylers() fixed_ok_tags = [click.style('FIXED', fg='red', bold=True), click.style(' OK ', fg='green', bold=True)] tags = (zazu.util.FAIL_OK if check else fixed_ok_tags) with zazu.util.cd(config.repo_root): if stylers: if cached: staged_files = zazu.git_helper.get_touched_files(config.repo) read_fn = zazu.git_helper.read_staged write_fn = stage_patch else: read_fn = read_file write_fn = write_file if check: write_fn = None file_sets = {} styler_file_sets = {} all_files = set() for s in stylers: includes = tuple(s.includes) excludes = tuple(s.excludes) if ((includes, excludes) not in file_sets): files = set(zazu.util.scantree(config.repo_root, includes, excludes, exclude_hidden=True)) if cached: files = files.intersection(staged_files) file_sets[(includes, excludes)] = files else: files = file_sets[(includes, excludes)] styler_file_sets[s] = files all_files |= files work = [functools.partial(style_file, styler_list(f, styler_file_sets, stylers), f, read_fn, write_fn) for f in all_files] checked_files = zazu.util.dispatch(work) for (f, stylers, violation) in checked_files: if verbose: click.echo(zazu.util.format_checklist_item((not violation), text='({}) {}'.format(', '.join([s.name() for s in stylers]), f), tag_formats=tags)) violation_count += violation if verbose: file_count = len(all_files) if check: click.echo('{} files with violations in {} files'.format(violation_count, file_count)) else: click.echo('{} files fixed in {} files'.format(violation_count, file_count)) sys.exit(((- 1) if (check and violation_count) else 0)) else: click.echo('no style settings found')
@click.command() @zazu.config.pass_config @click.option('-v', '--verbose', is_flag=True, help='print files that are dirty') @click.option('--check', is_flag=True, help='only check the repo for style violations, do not correct them') @click.option('--cached', is_flag=True, help='only examine/fix files that are staged for SCM commit') def style(config, verbose, check, cached): config.check_repo() violation_count = 0 stylers = config.stylers() fixed_ok_tags = [click.style('FIXED', fg='red', bold=True), click.style(' OK ', fg='green', bold=True)] tags = (zazu.util.FAIL_OK if check else fixed_ok_tags) with zazu.util.cd(config.repo_root): if stylers: if cached: staged_files = zazu.git_helper.get_touched_files(config.repo) read_fn = zazu.git_helper.read_staged write_fn = stage_patch else: read_fn = read_file write_fn = write_file if check: write_fn = None file_sets = {} styler_file_sets = {} all_files = set() for s in stylers: includes = tuple(s.includes) excludes = tuple(s.excludes) if ((includes, excludes) not in file_sets): files = set(zazu.util.scantree(config.repo_root, includes, excludes, exclude_hidden=True)) if cached: files = files.intersection(staged_files) file_sets[(includes, excludes)] = files else: files = file_sets[(includes, excludes)] styler_file_sets[s] = files all_files |= files work = [functools.partial(style_file, styler_list(f, styler_file_sets, stylers), f, read_fn, write_fn) for f in all_files] checked_files = zazu.util.dispatch(work) for (f, stylers, violation) in checked_files: if verbose: click.echo(zazu.util.format_checklist_item((not violation), text='({}) {}'.format(', '.join([s.name() for s in stylers]), f), tag_formats=tags)) violation_count += violation if verbose: file_count = len(all_files) if check: click.echo('{} files with violations in {} files'.format(violation_count, file_count)) else: click.echo('{} files fixed in {} files'.format(violation_count, file_count)) sys.exit(((- 1) if (check and violation_count) else 0)) else: click.echo('no style settings found')<|docstring|>Style repo files or check that they are valid style.<|endoftext|>
f44753f4c397986d45af048e94a93e39114d8d6d76a560205b6fa328a01753cb
def __init__(self, name: str, parameters: TrainingParameters) -> None: '\n\n :param name: A unique name for this trainer used to create the data on the\n caffe2 workspace\n :param parameters: The set of training parameters\n ' self.optimizer = parameters.optimizer self.learning_rate = parameters.learning_rate self.lr_decay = parameters.lr_decay self.lr_policy = parameters.lr_policy DNN.__init__(self, name, parameters)
:param name: A unique name for this trainer used to create the data on the caffe2 workspace :param parameters: The set of training parameters
Section_6_BlueWhale/ml/rl/training/ml_trainer.py
__init__
PacktPublishing/Hands-On-Deep-Learning-with-Caffe2
7
python
def __init__(self, name: str, parameters: TrainingParameters) -> None: '\n\n :param name: A unique name for this trainer used to create the data on the\n caffe2 workspace\n :param parameters: The set of training parameters\n ' self.optimizer = parameters.optimizer self.learning_rate = parameters.learning_rate self.lr_decay = parameters.lr_decay self.lr_policy = parameters.lr_policy DNN.__init__(self, name, parameters)
def __init__(self, name: str, parameters: TrainingParameters) -> None: '\n\n :param name: A unique name for this trainer used to create the data on the\n caffe2 workspace\n :param parameters: The set of training parameters\n ' self.optimizer = parameters.optimizer self.learning_rate = parameters.learning_rate self.lr_decay = parameters.lr_decay self.lr_policy = parameters.lr_policy DNN.__init__(self, name, parameters)<|docstring|>:param name: A unique name for this trainer used to create the data on the caffe2 workspace :param parameters: The set of training parameters<|endoftext|>
0c5cbb629e9524b8d6c31d313c5177c9512763e04f057435c579f734f83072e5
def generateLossOps(self, model: ModelHelper, output_blob: str, label_blob: str) -> str: '\n Adds loss operators to net. The loss function is computed by a squared L2\n distance, and then averaged over all items in the minibatch.\n\n :param model: ModelHelper object to add loss operators to.\n :param model_id: String identifier.\n :param output_blob: Blob containing output of net.\n :param label_blob: Blob containing labels.\n :param loss_blob: Blob in which to store loss.\n ' dist = model.SquaredL2Distance([label_blob, output_blob], model.net.NextBlob('dist')) loss = model.net.NextBlob('loss') model.AveragedLoss(dist, loss) return loss
Adds loss operators to net. The loss function is computed by a squared L2 distance, and then averaged over all items in the minibatch. :param model: ModelHelper object to add loss operators to. :param model_id: String identifier. :param output_blob: Blob containing output of net. :param label_blob: Blob containing labels. :param loss_blob: Blob in which to store loss.
Section_6_BlueWhale/ml/rl/training/ml_trainer.py
generateLossOps
PacktPublishing/Hands-On-Deep-Learning-with-Caffe2
7
python
def generateLossOps(self, model: ModelHelper, output_blob: str, label_blob: str) -> str: '\n Adds loss operators to net. The loss function is computed by a squared L2\n distance, and then averaged over all items in the minibatch.\n\n :param model: ModelHelper object to add loss operators to.\n :param model_id: String identifier.\n :param output_blob: Blob containing output of net.\n :param label_blob: Blob containing labels.\n :param loss_blob: Blob in which to store loss.\n ' dist = model.SquaredL2Distance([label_blob, output_blob], model.net.NextBlob('dist')) loss = model.net.NextBlob('loss') model.AveragedLoss(dist, loss) return loss
def generateLossOps(self, model: ModelHelper, output_blob: str, label_blob: str) -> str: '\n Adds loss operators to net. The loss function is computed by a squared L2\n distance, and then averaged over all items in the minibatch.\n\n :param model: ModelHelper object to add loss operators to.\n :param model_id: String identifier.\n :param output_blob: Blob containing output of net.\n :param label_blob: Blob containing labels.\n :param loss_blob: Blob in which to store loss.\n ' dist = model.SquaredL2Distance([label_blob, output_blob], model.net.NextBlob('dist')) loss = model.net.NextBlob('loss') model.AveragedLoss(dist, loss) return loss<|docstring|>Adds loss operators to net. The loss function is computed by a squared L2 distance, and then averaged over all items in the minibatch. :param model: ModelHelper object to add loss operators to. :param model_id: String identifier. :param output_blob: Blob containing output of net. :param label_blob: Blob containing labels. :param loss_blob: Blob in which to store loss.<|endoftext|>
2dbedd921022c3e06a7021d6bbd079c84660f949ff4e1cf57c948e236f94427e
def cache_set_max(mx): 'Set the maximum number of operations libvips will cache.' vips_lib.vips_cache_set_max(mx)
Set the maximum number of operations libvips will cache.
pyvips/voperation.py
cache_set_max
timgates42/pyvips
142
python
def cache_set_max(mx): vips_lib.vips_cache_set_max(mx)
def cache_set_max(mx): vips_lib.vips_cache_set_max(mx)<|docstring|>Set the maximum number of operations libvips will cache.<|endoftext|>
f91c8584f6235005caf12b2594e08581528e2a8eae423b4b6a48af219aef94fc
def cache_set_max_mem(mx): 'Limit the operation cache by memory use.' vips_lib.vips_cache_set_max_mem(mx)
Limit the operation cache by memory use.
pyvips/voperation.py
cache_set_max_mem
timgates42/pyvips
142
python
def cache_set_max_mem(mx): vips_lib.vips_cache_set_max_mem(mx)
def cache_set_max_mem(mx): vips_lib.vips_cache_set_max_mem(mx)<|docstring|>Limit the operation cache by memory use.<|endoftext|>
bfd7257da9dd0ca2d30c99db52b1504f15d2f5bb8b23563a60a04ba41676354d
def cache_set_max_files(mx): 'Limit the operation cache by number of open files.' vips_lib.vips_cache_set_max_files(mx)
Limit the operation cache by number of open files.
pyvips/voperation.py
cache_set_max_files
timgates42/pyvips
142
python
def cache_set_max_files(mx): vips_lib.vips_cache_set_max_files(mx)
def cache_set_max_files(mx): vips_lib.vips_cache_set_max_files(mx)<|docstring|>Limit the operation cache by number of open files.<|endoftext|>
219910eebf661a10a1a00c2a89b2c91f110c2c2b8cca0320508aad087140ae9a
def cache_set_trace(trace): 'Turn on libvips cache tracing.' vips_lib.vips_cache_set_trace(trace)
Turn on libvips cache tracing.
pyvips/voperation.py
cache_set_trace
timgates42/pyvips
142
python
def cache_set_trace(trace): vips_lib.vips_cache_set_trace(trace)
def cache_set_trace(trace): vips_lib.vips_cache_set_trace(trace)<|docstring|>Turn on libvips cache tracing.<|endoftext|>
ab9f6433a5342ab87e6cce302e8033e311173fc9bff896e130a8b1805b254e98
@staticmethod def call(operation_name, *args, **kwargs): "Call a libvips operation.\n\n Use this method to call any libvips operation. For example::\n\n black_image = pyvips.Operation.call('black', 10, 10)\n\n See the Introduction for notes on how this works.\n\n " logger.debug('VipsOperation.call: operation_name = %s', operation_name) intro = Introspect.get(operation_name) if (len(intro.required_input) != len(args)): raise Error('{0} needs {1} arguments, but {2} given'.format(operation_name, len(intro.required_input), len(args))) op = Operation.new_from_name(operation_name) string_options = kwargs.pop('string_options', '') if (not op.set_string(string_options)): raise Error('unable to call {0}'.format(operation_name)) match_image = _find_inside((lambda x: isinstance(x, pyvips.Image)), args) logger.debug('VipsOperation.call: match_image = %s', match_image) references = [] def add_reference(x): if isinstance(x, pyvips.Image): for i in x._references: if (i not in references): references.append(i) return False for (name, value) in zip(intro.required_input, args): _find_inside(add_reference, value) op.set(name, intro.details[name]['flags'], match_image, value) for name in kwargs: value = kwargs[name] details = intro.details[name] if ((name not in intro.optional_input) and (name not in intro.optional_output)): raise Error('{0} does not support optional argument {1}'.format(operation_name, name)) if ((details['flags'] & _DEPRECATED) != 0): logger.info('{0} argument {1} is deprecated', operation_name, name) _find_inside(add_reference, value) op.set(name, details['flags'], match_image, value) vop = vips_lib.vips_cache_operation_build(op.pointer) if (vop == ffi.NULL): raise Error('unable to call {0}'.format(operation_name)) op = Operation(vop) def set_reference(x): if isinstance(x, pyvips.Image): x._references += references return False result = [] for name in intro.required_output: value = op.get(name) _find_inside(set_reference, value) result.append(value) opts = {} for name in intro.optional_output: if (name in kwargs): value = op.get(name) _find_inside(set_reference, value) opts[name] = value if (len(opts) > 0): result.append(opts) vips_lib.vips_object_unref_outputs(op.vobject) if (len(result) == 0): result = None elif (len(result) == 1): result = result[0] logger.debug('VipsOperation.call: result = %s', result) return result
Call a libvips operation. Use this method to call any libvips operation. For example:: black_image = pyvips.Operation.call('black', 10, 10) See the Introduction for notes on how this works.
pyvips/voperation.py
call
timgates42/pyvips
142
python
@staticmethod def call(operation_name, *args, **kwargs): "Call a libvips operation.\n\n Use this method to call any libvips operation. For example::\n\n black_image = pyvips.Operation.call('black', 10, 10)\n\n See the Introduction for notes on how this works.\n\n " logger.debug('VipsOperation.call: operation_name = %s', operation_name) intro = Introspect.get(operation_name) if (len(intro.required_input) != len(args)): raise Error('{0} needs {1} arguments, but {2} given'.format(operation_name, len(intro.required_input), len(args))) op = Operation.new_from_name(operation_name) string_options = kwargs.pop('string_options', ) if (not op.set_string(string_options)): raise Error('unable to call {0}'.format(operation_name)) match_image = _find_inside((lambda x: isinstance(x, pyvips.Image)), args) logger.debug('VipsOperation.call: match_image = %s', match_image) references = [] def add_reference(x): if isinstance(x, pyvips.Image): for i in x._references: if (i not in references): references.append(i) return False for (name, value) in zip(intro.required_input, args): _find_inside(add_reference, value) op.set(name, intro.details[name]['flags'], match_image, value) for name in kwargs: value = kwargs[name] details = intro.details[name] if ((name not in intro.optional_input) and (name not in intro.optional_output)): raise Error('{0} does not support optional argument {1}'.format(operation_name, name)) if ((details['flags'] & _DEPRECATED) != 0): logger.info('{0} argument {1} is deprecated', operation_name, name) _find_inside(add_reference, value) op.set(name, details['flags'], match_image, value) vop = vips_lib.vips_cache_operation_build(op.pointer) if (vop == ffi.NULL): raise Error('unable to call {0}'.format(operation_name)) op = Operation(vop) def set_reference(x): if isinstance(x, pyvips.Image): x._references += references return False result = [] for name in intro.required_output: value = op.get(name) _find_inside(set_reference, value) result.append(value) opts = {} for name in intro.optional_output: if (name in kwargs): value = op.get(name) _find_inside(set_reference, value) opts[name] = value if (len(opts) > 0): result.append(opts) vips_lib.vips_object_unref_outputs(op.vobject) if (len(result) == 0): result = None elif (len(result) == 1): result = result[0] logger.debug('VipsOperation.call: result = %s', result) return result
@staticmethod def call(operation_name, *args, **kwargs): "Call a libvips operation.\n\n Use this method to call any libvips operation. For example::\n\n black_image = pyvips.Operation.call('black', 10, 10)\n\n See the Introduction for notes on how this works.\n\n " logger.debug('VipsOperation.call: operation_name = %s', operation_name) intro = Introspect.get(operation_name) if (len(intro.required_input) != len(args)): raise Error('{0} needs {1} arguments, but {2} given'.format(operation_name, len(intro.required_input), len(args))) op = Operation.new_from_name(operation_name) string_options = kwargs.pop('string_options', ) if (not op.set_string(string_options)): raise Error('unable to call {0}'.format(operation_name)) match_image = _find_inside((lambda x: isinstance(x, pyvips.Image)), args) logger.debug('VipsOperation.call: match_image = %s', match_image) references = [] def add_reference(x): if isinstance(x, pyvips.Image): for i in x._references: if (i not in references): references.append(i) return False for (name, value) in zip(intro.required_input, args): _find_inside(add_reference, value) op.set(name, intro.details[name]['flags'], match_image, value) for name in kwargs: value = kwargs[name] details = intro.details[name] if ((name not in intro.optional_input) and (name not in intro.optional_output)): raise Error('{0} does not support optional argument {1}'.format(operation_name, name)) if ((details['flags'] & _DEPRECATED) != 0): logger.info('{0} argument {1} is deprecated', operation_name, name) _find_inside(add_reference, value) op.set(name, details['flags'], match_image, value) vop = vips_lib.vips_cache_operation_build(op.pointer) if (vop == ffi.NULL): raise Error('unable to call {0}'.format(operation_name)) op = Operation(vop) def set_reference(x): if isinstance(x, pyvips.Image): x._references += references return False result = [] for name in intro.required_output: value = op.get(name) _find_inside(set_reference, value) result.append(value) opts = {} for name in intro.optional_output: if (name in kwargs): value = op.get(name) _find_inside(set_reference, value) opts[name] = value if (len(opts) > 0): result.append(opts) vips_lib.vips_object_unref_outputs(op.vobject) if (len(result) == 0): result = None elif (len(result) == 1): result = result[0] logger.debug('VipsOperation.call: result = %s', result) return result<|docstring|>Call a libvips operation. Use this method to call any libvips operation. For example:: black_image = pyvips.Operation.call('black', 10, 10) See the Introduction for notes on how this works.<|endoftext|>
b401edba6ec30cbd2373bb29cec1056111503f8d2e3d693363afa0ead263af94
@staticmethod def generate_docstring(operation_name): 'Make a google-style docstring.\n\n This is used to generate help() output.\n\n ' if (operation_name in Operation._docstring_cache): return Operation._docstring_cache[operation_name] intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) result = ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (', '.join(args) + ')\n') def argstr(name): details = intro.details[name] return u' {0} ({1}): {2}\n'.format(name, GValue.gtype_to_python(details['type']), details['blurb']) result += '\nReturns:\n' for name in intro.required_output: result += argstr(name) result += '\nArgs:\n' if (intro.member_x is not None): result += argstr(intro.member_x) for name in intro.method_args: result += argstr(name) if (len(intro.optional_input) > 0): result += '\nKeyword args:\n' for name in intro.optional_input: result += argstr(name) if (len(intro.optional_output) > 0): result += '\nOther Parameters:\n' for name in intro.optional_output: result += argstr(name) result += '\nRaises:\n :class:`.Error`\n' Operation._docstring_cache[operation_name] = result return result
Make a google-style docstring. This is used to generate help() output.
pyvips/voperation.py
generate_docstring
timgates42/pyvips
142
python
@staticmethod def generate_docstring(operation_name): 'Make a google-style docstring.\n\n This is used to generate help() output.\n\n ' if (operation_name in Operation._docstring_cache): return Operation._docstring_cache[operation_name] intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) result = ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (', '.join(args) + ')\n') def argstr(name): details = intro.details[name] return u' {0} ({1}): {2}\n'.format(name, GValue.gtype_to_python(details['type']), details['blurb']) result += '\nReturns:\n' for name in intro.required_output: result += argstr(name) result += '\nArgs:\n' if (intro.member_x is not None): result += argstr(intro.member_x) for name in intro.method_args: result += argstr(name) if (len(intro.optional_input) > 0): result += '\nKeyword args:\n' for name in intro.optional_input: result += argstr(name) if (len(intro.optional_output) > 0): result += '\nOther Parameters:\n' for name in intro.optional_output: result += argstr(name) result += '\nRaises:\n :class:`.Error`\n' Operation._docstring_cache[operation_name] = result return result
@staticmethod def generate_docstring(operation_name): 'Make a google-style docstring.\n\n This is used to generate help() output.\n\n ' if (operation_name in Operation._docstring_cache): return Operation._docstring_cache[operation_name] intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) result = ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (', '.join(args) + ')\n') def argstr(name): details = intro.details[name] return u' {0} ({1}): {2}\n'.format(name, GValue.gtype_to_python(details['type']), details['blurb']) result += '\nReturns:\n' for name in intro.required_output: result += argstr(name) result += '\nArgs:\n' if (intro.member_x is not None): result += argstr(intro.member_x) for name in intro.method_args: result += argstr(name) if (len(intro.optional_input) > 0): result += '\nKeyword args:\n' for name in intro.optional_input: result += argstr(name) if (len(intro.optional_output) > 0): result += '\nOther Parameters:\n' for name in intro.optional_output: result += argstr(name) result += '\nRaises:\n :class:`.Error`\n' Operation._docstring_cache[operation_name] = result return result<|docstring|>Make a google-style docstring. This is used to generate help() output.<|endoftext|>
424d5cdaf8ca8f20247891c4e6133b313797de4c3309a19bdb1b2d7ac1d6138d
@staticmethod def generate_sphinx(operation_name): 'Make a sphinx-style docstring.\n\n This is used to generate the off-line docs.\n\n ' intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) if (intro.member_x is not None): result = '.. method:: ' else: result = '.. staticmethod:: ' args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (((operation_name + '(') + ', '.join(args)) + ')\n\n') result += ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] result += ', '.join(args) result += ')\n\n' for name in (intro.method_args + intro.optional_input): details = intro.details[name] result += ':param {0} {1}: {2}\n'.format(GValue.gtype_to_python(details['type']), name, details['blurb']) for name in intro.optional_output: result += ':param bool {0}: enable output: {1}\n'.format(name, intro.details[name]['blurb']) output_types = [GValue.gtype_to_python(intro.details[name]['type']) for name in intro.required_output] if (len(output_types) == 1): output_type = output_types[0] else: output_type = (('list[' + ', '.join(output_types)) + ']') if (len(intro.optional_output) > 0): output_types += ['Dict[str, mixed]'] output_type += ((' or list[' + ', '.join(output_types)) + ']') result += ((':rtype: ' + output_type) + '\n') result += ':raises Error:\n' return result
Make a sphinx-style docstring. This is used to generate the off-line docs.
pyvips/voperation.py
generate_sphinx
timgates42/pyvips
142
python
@staticmethod def generate_sphinx(operation_name): 'Make a sphinx-style docstring.\n\n This is used to generate the off-line docs.\n\n ' intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) if (intro.member_x is not None): result = '.. method:: ' else: result = '.. staticmethod:: ' args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (((operation_name + '(') + ', '.join(args)) + ')\n\n') result += ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] result += ', '.join(args) result += ')\n\n' for name in (intro.method_args + intro.optional_input): details = intro.details[name] result += ':param {0} {1}: {2}\n'.format(GValue.gtype_to_python(details['type']), name, details['blurb']) for name in intro.optional_output: result += ':param bool {0}: enable output: {1}\n'.format(name, intro.details[name]['blurb']) output_types = [GValue.gtype_to_python(intro.details[name]['type']) for name in intro.required_output] if (len(output_types) == 1): output_type = output_types[0] else: output_type = (('list[' + ', '.join(output_types)) + ']') if (len(intro.optional_output) > 0): output_types += ['Dict[str, mixed]'] output_type += ((' or list[' + ', '.join(output_types)) + ']') result += ((':rtype: ' + output_type) + '\n') result += ':raises Error:\n' return result
@staticmethod def generate_sphinx(operation_name): 'Make a sphinx-style docstring.\n\n This is used to generate the off-line docs.\n\n ' intro = Introspect.get(operation_name) if ((intro.flags & _OPERATION_DEPRECATED) != 0): raise Error('No such operator.', 'operator "{0}" is deprecated'.format(operation_name)) if (intro.member_x is not None): result = '.. method:: ' else: result = '.. staticmethod:: ' args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] args += [(x + '=bool') for x in intro.optional_output] result += (((operation_name + '(') + ', '.join(args)) + ')\n\n') result += ((intro.description[0].upper() + intro.description[1:]) + '.\n\n') result += 'Example:\n' result += ((' ' + ', '.join(intro.required_output)) + ' = ') if (intro.member_x is not None): result += (((intro.member_x + '.') + operation_name) + '(') else: result += (('pyvips.Image.' + operation_name) + '(') args = [] args += intro.method_args args += [((x + '=') + GValue.gtype_to_python(intro.details[x]['type'])) for x in intro.optional_input] result += ', '.join(args) result += ')\n\n' for name in (intro.method_args + intro.optional_input): details = intro.details[name] result += ':param {0} {1}: {2}\n'.format(GValue.gtype_to_python(details['type']), name, details['blurb']) for name in intro.optional_output: result += ':param bool {0}: enable output: {1}\n'.format(name, intro.details[name]['blurb']) output_types = [GValue.gtype_to_python(intro.details[name]['type']) for name in intro.required_output] if (len(output_types) == 1): output_type = output_types[0] else: output_type = (('list[' + ', '.join(output_types)) + ']') if (len(intro.optional_output) > 0): output_types += ['Dict[str, mixed]'] output_type += ((' or list[' + ', '.join(output_types)) + ']') result += ((':rtype: ' + output_type) + '\n') result += ':raises Error:\n' return result<|docstring|>Make a sphinx-style docstring. This is used to generate the off-line docs.<|endoftext|>
a656d41c32d48677bc0f6130ef11330724d8260c87ca9549f303c6365f22f664
@staticmethod def generate_sphinx_all(): 'Generate sphinx documentation.\n\n This generates a .rst file for all auto-generated image methods. Use it\n to regenerate the docs with something like::\n\n $ python -c "import pyvips; pyvips.Operation.generate_sphinx_all()" > x\n\n And copy-paste the file contents into doc/vimage.rst in the appropriate\n place.\n\n ' alias = ['crop'] alias_gtypes = {} for name in alias: gtype = pyvips.type_find('VipsOperation', name) alias_gtypes[gtype] = name all_names = [] def add_name(gtype, a, b): if (gtype in alias_gtypes): name = alias_gtypes[gtype] else: name = nickname_find(gtype) try: Operation.generate_sphinx(name) all_names.append(name) except Error: pass type_map(gtype, add_name) return ffi.NULL type_map(type_from_name('VipsOperation'), add_name) all_names.sort() exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank'] all_names = [x for x in all_names if (x not in exclude)] print('.. class:: pyvips.Image\n') print(' .. rubric:: Methods\n') print(' .. autosummary::') print(' :nosignatures:\n') for name in all_names: print(' ~{0}'.format(name)) print() print() for name in all_names: docstr = Operation.generate_sphinx(name) docstr = docstr.replace('\n', '\n ') print((' ' + docstr))
Generate sphinx documentation. This generates a .rst file for all auto-generated image methods. Use it to regenerate the docs with something like:: $ python -c "import pyvips; pyvips.Operation.generate_sphinx_all()" > x And copy-paste the file contents into doc/vimage.rst in the appropriate place.
pyvips/voperation.py
generate_sphinx_all
timgates42/pyvips
142
python
@staticmethod def generate_sphinx_all(): 'Generate sphinx documentation.\n\n This generates a .rst file for all auto-generated image methods. Use it\n to regenerate the docs with something like::\n\n $ python -c "import pyvips; pyvips.Operation.generate_sphinx_all()" > x\n\n And copy-paste the file contents into doc/vimage.rst in the appropriate\n place.\n\n ' alias = ['crop'] alias_gtypes = {} for name in alias: gtype = pyvips.type_find('VipsOperation', name) alias_gtypes[gtype] = name all_names = [] def add_name(gtype, a, b): if (gtype in alias_gtypes): name = alias_gtypes[gtype] else: name = nickname_find(gtype) try: Operation.generate_sphinx(name) all_names.append(name) except Error: pass type_map(gtype, add_name) return ffi.NULL type_map(type_from_name('VipsOperation'), add_name) all_names.sort() exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank'] all_names = [x for x in all_names if (x not in exclude)] print('.. class:: pyvips.Image\n') print(' .. rubric:: Methods\n') print(' .. autosummary::') print(' :nosignatures:\n') for name in all_names: print(' ~{0}'.format(name)) print() print() for name in all_names: docstr = Operation.generate_sphinx(name) docstr = docstr.replace('\n', '\n ') print((' ' + docstr))
@staticmethod def generate_sphinx_all(): 'Generate sphinx documentation.\n\n This generates a .rst file for all auto-generated image methods. Use it\n to regenerate the docs with something like::\n\n $ python -c "import pyvips; pyvips.Operation.generate_sphinx_all()" > x\n\n And copy-paste the file contents into doc/vimage.rst in the appropriate\n place.\n\n ' alias = ['crop'] alias_gtypes = {} for name in alias: gtype = pyvips.type_find('VipsOperation', name) alias_gtypes[gtype] = name all_names = [] def add_name(gtype, a, b): if (gtype in alias_gtypes): name = alias_gtypes[gtype] else: name = nickname_find(gtype) try: Operation.generate_sphinx(name) all_names.append(name) except Error: pass type_map(gtype, add_name) return ffi.NULL type_map(type_from_name('VipsOperation'), add_name) all_names.sort() exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank'] all_names = [x for x in all_names if (x not in exclude)] print('.. class:: pyvips.Image\n') print(' .. rubric:: Methods\n') print(' .. autosummary::') print(' :nosignatures:\n') for name in all_names: print(' ~{0}'.format(name)) print() print() for name in all_names: docstr = Operation.generate_sphinx(name) docstr = docstr.replace('\n', '\n ') print((' ' + docstr))<|docstring|>Generate sphinx documentation. This generates a .rst file for all auto-generated image methods. Use it to regenerate the docs with something like:: $ python -c "import pyvips; pyvips.Operation.generate_sphinx_all()" > x And copy-paste the file contents into doc/vimage.rst in the appropriate place.<|endoftext|>
0abf8978274c0159cf207a87dc60978a7e0f2694733a2011c22f1885b946eba5
def edit(self, name): '\n if name is in database edit else add\n\n :param name:\n :return:\n ' db = self.get_database() with db.session_ctx(): dbuser = db.get_user(name) if dbuser: self._edit_user(db, dbuser) else: self.user = name self._add_item(db) return self.user
if name is in database edit else add :param name: :return:
pychron/entry/entry_views/user_entry.py
edit
ASUPychron/pychron
31
python
def edit(self, name): '\n if name is in database edit else add\n\n :param name:\n :return:\n ' db = self.get_database() with db.session_ctx(): dbuser = db.get_user(name) if dbuser: self._edit_user(db, dbuser) else: self.user = name self._add_item(db) return self.user
def edit(self, name): '\n if name is in database edit else add\n\n :param name:\n :return:\n ' db = self.get_database() with db.session_ctx(): dbuser = db.get_user(name) if dbuser: self._edit_user(db, dbuser) else: self.user = name self._add_item(db) return self.user<|docstring|>if name is in database edit else add :param name: :return:<|endoftext|>
ca0b58312a33ade488bb71650476fb394d77e7ace0501850b06c37de722398d6
def test_user_can_register(self): 'Test new user can be added to the system.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name='test', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 201) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('registered', response_msg['Message'])
Test new user can be added to the system.
tests/v2/test_user.py
test_user_can_register
ThaDeveloper/weConnect
1
python
def test_user_can_register(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name='test', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 201) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('registered', response_msg['Message'])
def test_user_can_register(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name='test', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 201) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('registered', response_msg['Message'])<|docstring|>Test new user can be added to the system.<|endoftext|>
dedcc7727d24f669f283f8a190713c815b694f377c714ab50138baa3a5e9b268
def test_blank_username(self): 'Tests error raised with username missing.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='', password='testpass', first_name='blank', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
Tests error raised with username missing.
tests/v2/test_user.py
test_blank_username
ThaDeveloper/weConnect
1
python
def test_blank_username(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username=, password='testpass', first_name='blank', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
def test_blank_username(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username=, password='testpass', first_name='blank', last_name='username')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])<|docstring|>Tests error raised with username missing.<|endoftext|>
5631c81e36fee5344abdb789dc40689ae3a11748d4cd26d453f6d28d591207cb
def test_username_has_space(self): 'Tests error raised when username contains spaces.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='first last', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('spaces', response_msg['Message'])
Tests error raised when username contains spaces.
tests/v2/test_user.py
test_username_has_space
ThaDeveloper/weConnect
1
python
def test_username_has_space(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='first last', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('spaces', response_msg['Message'])
def test_username_has_space(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='first last', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('spaces', response_msg['Message'])<|docstring|>Tests error raised when username contains spaces.<|endoftext|>
3de5fa3097468c7f7b96733ce2350c87e8b33e1ffbb316d0337418d574514233
def test_username_has_enough_characters(self): 'Tests error raised when username has less then 3 characters.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='am', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('characters', response_msg['Message'])
Tests error raised when username has less then 3 characters.
tests/v2/test_user.py
test_username_has_enough_characters
ThaDeveloper/weConnect
1
python
def test_username_has_enough_characters(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='am', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('characters', response_msg['Message'])
def test_username_has_enough_characters(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='am', password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('characters', response_msg['Message'])<|docstring|>Tests error raised when username has less then 3 characters.<|endoftext|>
d88b822d6a367e887c3c9c0d2ad10e23ae7fb447b736fbf08dc2793be5378280
def test_missing_password(self): 'Tests error raised when password is missing.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
Tests error raised when password is missing.
tests/v2/test_user.py
test_missing_password
ThaDeveloper/weConnect
1
python
def test_missing_password(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password=, first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
def test_missing_password(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password=, first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])<|docstring|>Tests error raised when password is missing.<|endoftext|>
2a0126cb12e6628fcd6ae8d9873be0e36cb532dfe0f18f5c71bce93a58e1a9c2
def test_missing_first_or_last_name(self): 'Tests error raised when first name or last name is missing.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name='', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
Tests error raised when first name or last name is missing.
tests/v2/test_user.py
test_missing_first_or_last_name
ThaDeveloper/weConnect
1
python
def test_missing_first_or_last_name(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name=, last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
def test_missing_first_or_last_name(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testusername', password='testpassword', first_name=, last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])<|docstring|>Tests error raised when first name or last name is missing.<|endoftext|>
24add8ea6c54dd08e9257f3289a7bb79c0fbe4ff7a0c180b6420530dca81af6d
def test_username_isstring(self): 'Tests error raised when wrong username format is provided.' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username=1234, password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Wrong', response_msg['Message'])
Tests error raised when wrong username format is provided.
tests/v2/test_user.py
test_username_isstring
ThaDeveloper/weConnect
1
python
def test_username_isstring(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username=1234, password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Wrong', response_msg['Message'])
def test_username_isstring(self): response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username=1234, password='testpass', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Wrong', response_msg['Message'])<|docstring|>Tests error raised when wrong username format is provided.<|endoftext|>
9a4f3c3e165f77b23f9ca8ab865c79f1f20b4e6f78bf72fd8be5857e4ef69a65
def test_duplicate_users(self): '\n Tests for duplicate usernames\n ' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testuser', password='password', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('already exists', response_msg['Message'])
Tests for duplicate usernames
tests/v2/test_user.py
test_duplicate_users
ThaDeveloper/weConnect
1
python
def test_duplicate_users(self): '\n \n ' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testuser', password='password', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('already exists', response_msg['Message'])
def test_duplicate_users(self): '\n \n ' response = self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testuser', password='password', first_name='first', last_name='last')), content_type='application/json') self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('already exists', response_msg['Message'])<|docstring|>Tests for duplicate usernames<|endoftext|>
8f7d956f6b24a02be3b63397e9605786cdd0ec9f81b27a9272670fd6090ff317
def test_valid_login_generates_token(self): 'Tests token is generated on successful login.' response = self.app.post('/api/v2/auth/login', data=json.dumps(self.user), content_type='application/json') self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('token', response_msg)
Tests token is generated on successful login.
tests/v2/test_user.py
test_valid_login_generates_token
ThaDeveloper/weConnect
1
python
def test_valid_login_generates_token(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(self.user), content_type='application/json') self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('token', response_msg)
def test_valid_login_generates_token(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(self.user), content_type='application/json') self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('token', response_msg)<|docstring|>Tests token is generated on successful login.<|endoftext|>
20bc72a11b999a831d2e8bda3e09d688e881a504abf26b7592fa5c20767c4a37
def test_missing_credentials(self): 'Tests error raised for missing auth details.' response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='', password='')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
Tests error raised for missing auth details.
tests/v2/test_user.py
test_missing_credentials
ThaDeveloper/weConnect
1
python
def test_missing_credentials(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username=, password=)), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])
def test_missing_credentials(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username=, password=)), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('required', response_msg['Message'])<|docstring|>Tests error raised for missing auth details.<|endoftext|>
15171ceeb1592d4aea4203dd284c2d857d21b9ca7f77c1d2956deb1019c33796
def test_invalid_username_login(self): 'Tests unauthorized error raised with invalid username.' response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='invalid', password='testpass')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])
Tests unauthorized error raised with invalid username.
tests/v2/test_user.py
test_invalid_username_login
ThaDeveloper/weConnect
1
python
def test_invalid_username_login(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='invalid', password='testpass')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])
def test_invalid_username_login(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='invalid', password='testpass')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])<|docstring|>Tests unauthorized error raised with invalid username.<|endoftext|>
e6e89b79dca68c88d74c0eb72b4214bb34f311f7ce42939b2d12f2020cb24e0d
def test_invalid_password_login(self): 'Tests unauthorized error raised with invalid password.' response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testuser', password='invalid')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('invalid', response_msg['Message'])
Tests unauthorized error raised with invalid password.
tests/v2/test_user.py
test_invalid_password_login
ThaDeveloper/weConnect
1
python
def test_invalid_password_login(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testuser', password='invalid')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('invalid', response_msg['Message'])
def test_invalid_password_login(self): response = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testuser', password='invalid')), content_type='application/json') self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('invalid', response_msg['Message'])<|docstring|>Tests unauthorized error raised with invalid password.<|endoftext|>
7e5677c34f1ea235117b954fa1f1070e582d69eb30ca0938ee9a7d9960530be6
def test_logout(self): 'Test logout success' response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('out', response_msg['Message'])
Test logout success
tests/v2/test_user.py
test_logout
ThaDeveloper/weConnect
1
python
def test_logout(self): response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('out', response_msg['Message'])
def test_logout(self): response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 200) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('out', response_msg['Message'])<|docstring|>Test logout success<|endoftext|>
7f59f1871c9f1f813ae2a8375a295f4182b85fd8e54b70b84e8e6d2ba203c13b
def test_double_logout(self): 'Test Re-logout' self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Already', response_msg['Message'])
Test Re-logout
tests/v2/test_user.py
test_double_logout
ThaDeveloper/weConnect
1
python
def test_double_logout(self): self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Already', response_msg['Message'])
def test_double_logout(self): self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) response = self.app.delete('/api/v2/auth/logout', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 400) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Already', response_msg['Message'])<|docstring|>Test Re-logout<|endoftext|>
6b7117c50ff326c37edd90a39e103769d89cbf4a16faf6a5ceb1555269aaa7f9
def test_reset_password(self): 'Register a user' self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testreset', password='pass', first_name='first', last_name='last')), headers={'content-type': 'application/json'}) self.login = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testreset', password='pass')), content_type='application/json') self.data = json.loads(self.login.get_data(as_text=True)) self.token = self.data['token'] response = self.app.put('/api/v2/auth/reset-password', data=json.dumps(dict(username='testreset', password='new_pass')), content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 202) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('updated', response_msg['Message'])
Register a user
tests/v2/test_user.py
test_reset_password
ThaDeveloper/weConnect
1
python
def test_reset_password(self): self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testreset', password='pass', first_name='first', last_name='last')), headers={'content-type': 'application/json'}) self.login = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testreset', password='pass')), content_type='application/json') self.data = json.loads(self.login.get_data(as_text=True)) self.token = self.data['token'] response = self.app.put('/api/v2/auth/reset-password', data=json.dumps(dict(username='testreset', password='new_pass')), content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 202) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('updated', response_msg['Message'])
def test_reset_password(self): self.app.post('/api/v2/auth/register', data=json.dumps(dict(username='testreset', password='pass', first_name='first', last_name='last')), headers={'content-type': 'application/json'}) self.login = self.app.post('/api/v2/auth/login', data=json.dumps(dict(username='testreset', password='pass')), content_type='application/json') self.data = json.loads(self.login.get_data(as_text=True)) self.token = self.data['token'] response = self.app.put('/api/v2/auth/reset-password', data=json.dumps(dict(username='testreset', password='new_pass')), content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 202) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('updated', response_msg['Message'])<|docstring|>Register a user<|endoftext|>
e3579f083468afdf687ecef8376559562dcb6985b2860c244596bccfcc90041f
def test_get_one_user(self): 'Tests authorized user can get a specific user profile' response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)
Tests authorized user can get a specific user profile
tests/v2/test_user.py
test_get_one_user
ThaDeveloper/weConnect
1
python
def test_get_one_user(self): response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)
def test_get_one_user(self): response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)<|docstring|>Tests authorized user can get a specific user profile<|endoftext|>
c10e3846692f2e605e457bfbee07e290949027f0baee159be034326caf07bfc8
def test_get_all_users(self): 'Test if get method gets all registered users' response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)
Test if get method gets all registered users
tests/v2/test_user.py
test_get_all_users
ThaDeveloper/weConnect
1
python
def test_get_all_users(self): response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)
def test_get_all_users(self): response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 200)<|docstring|>Test if get method gets all registered users<|endoftext|>
e16b9697af5b9b6180558d0e3b2b4ec037cd148c39b841761406906f7dcf9ad9
def test_get_all_users_unauthorized(self): 'Tests error raised for accessing user list for non-admins' response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])
Tests error raised for accessing user list for non-admins
tests/v2/test_user.py
test_get_all_users_unauthorized
ThaDeveloper/weConnect
1
python
def test_get_all_users_unauthorized(self): response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])
def test_get_all_users_unauthorized(self): response = self.app.get('/api/v2/auth/users', content_type='application/json', headers={'x-access-token': self.token}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])<|docstring|>Tests error raised for accessing user list for non-admins<|endoftext|>
7ca20055c1cb9b3edf43f446600e92ef8118e5ce0c7a840d1adc375ba64fb361
def test_get_one_user_unauthorized(self): 'Tests error raised for accessing a user for non-admin' response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.unkowntoken}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])
Tests error raised for accessing a user for non-admin
tests/v2/test_user.py
test_get_one_user_unauthorized
ThaDeveloper/weConnect
1
python
def test_get_one_user_unauthorized(self): response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.unkowntoken}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])
def test_get_one_user_unauthorized(self): response = self.app.get('/api/v2/auth/users/{}'.format(User.query.order_by(User.created_at).first().id), content_type='application/json', headers={'x-access-token': self.unkowntoken}) self.assertEqual(response.status_code, 401) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('Cannot perform', response_msg['Message'])<|docstring|>Tests error raised for accessing a user for non-admin<|endoftext|>
50b685b89027e8c71f5fc223d6d7c6bd46ff7b40e112275cedaa6fcfae476554
def test_get_user_404(self): 'Test error raised when accessing nonexisting user' response = self.app.get('/api/v2/auth/users/00', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 404) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])
Test error raised when accessing nonexisting user
tests/v2/test_user.py
test_get_user_404
ThaDeveloper/weConnect
1
python
def test_get_user_404(self): response = self.app.get('/api/v2/auth/users/00', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 404) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])
def test_get_user_404(self): response = self.app.get('/api/v2/auth/users/00', content_type='application/json', headers={'x-access-token': self.admintoken}) self.assertEqual(response.status_code, 404) response_msg = json.loads(response.data.decode('UTF-8')) self.assertIn('not found', response_msg['Message'])<|docstring|>Test error raised when accessing nonexisting user<|endoftext|>