body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
4b304a7d5ca49dbfcdbe22c4113743bee1d652ae9d560bca1ac74abfa83be3c4
@register_bprop(primops.J) def bprop_J(x, dz): 'Backpropagator for primitive `J`.' return (Jinv(dz),)
Backpropagator for primitive `J`.
myia/prim/grad_implementations.py
bprop_J
bartvm/myia
0
python
@register_bprop(primops.J) def bprop_J(x, dz): return (Jinv(dz),)
@register_bprop(primops.J) def bprop_J(x, dz): return (Jinv(dz),)<|docstring|>Backpropagator for primitive `J`.<|endoftext|>
021e4f4f1e4217ffade258e9293758df4183e47536a1b882ec6ec9f6c6aafa66
@register_bprop(primops.Jinv) def bprop_Jinv(x, dz): 'Backpropagator for primitive `Jinv`.' return (J(dz),)
Backpropagator for primitive `Jinv`.
myia/prim/grad_implementations.py
bprop_Jinv
bartvm/myia
0
python
@register_bprop(primops.Jinv) def bprop_Jinv(x, dz): return (J(dz),)
@register_bprop(primops.Jinv) def bprop_Jinv(x, dz): return (J(dz),)<|docstring|>Backpropagator for primitive `Jinv`.<|endoftext|>
ba20a57e97ba0bfe5f516cfb4d5b2ee2cae5e37ca9eded06dc8f03bd3ebfb4d9
@register_bprop(primops.zeros_like) def bprop_zeros_like(x, dz): 'Backpropagator for primitive `zeros_like`.' return (zeros_like(x),)
Backpropagator for primitive `zeros_like`.
myia/prim/grad_implementations.py
bprop_zeros_like
bartvm/myia
0
python
@register_bprop(primops.zeros_like) def bprop_zeros_like(x, dz): return (zeros_like(x),)
@register_bprop(primops.zeros_like) def bprop_zeros_like(x, dz): return (zeros_like(x),)<|docstring|>Backpropagator for primitive `zeros_like`.<|endoftext|>
8acc855af5308533397f3072e77f116a2c35db385208403c1a1f5ebc587c49a9
@register_augm(primops.if_) def __fprop__if_(c, tb, fb): 'Backpropagator for primitive `if`.' if Jinv(c): res = tb() else: res = fb() (rval, branch_bprop) = res def __bprop__if_(dout): zc = zeros_like(c) value = branch_bprop(dout)[0] if Jinv(c): return ((), zc, value, zeros_like(Jinv(fb))) else: return ((), zc, zeros_like(Jinv(tb)), value) return (rval, __bprop__if_)
Backpropagator for primitive `if`.
myia/prim/grad_implementations.py
__fprop__if_
bartvm/myia
0
python
@register_augm(primops.if_) def __fprop__if_(c, tb, fb): if Jinv(c): res = tb() else: res = fb() (rval, branch_bprop) = res def __bprop__if_(dout): zc = zeros_like(c) value = branch_bprop(dout)[0] if Jinv(c): return ((), zc, value, zeros_like(Jinv(fb))) else: return ((), zc, zeros_like(Jinv(tb)), value) return (rval, __bprop__if_)
@register_augm(primops.if_) def __fprop__if_(c, tb, fb): if Jinv(c): res = tb() else: res = fb() (rval, branch_bprop) = res def __bprop__if_(dout): zc = zeros_like(c) value = branch_bprop(dout)[0] if Jinv(c): return ((), zc, value, zeros_like(Jinv(fb))) else: return ((), zc, zeros_like(Jinv(tb)), value) return (rval, __bprop__if_)<|docstring|>Backpropagator for primitive `if`.<|endoftext|>
a44bebef150e1c2f0cf7dac653ea9147634515ed7498d880c9864211b7c8e75c
def _get_val_list() -> list: '\n create a list of validator addresses with prefix fxvaloper\n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) val_add_list = [] for d in data['validators']: val_add_list.append(d['operator_address']) return val_add_list
create a list of validator addresses with prefix fxvaloper
Data.py
_get_val_list
FunctionX/validator_queries
0
python
def _get_val_list() -> list: '\n \n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) val_add_list = [] for d in data['validators']: val_add_list.append(d['operator_address']) return val_add_list
def _get_val_list() -> list: '\n \n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) val_add_list = [] for d in data['validators']: val_add_list.append(d['operator_address']) return val_add_list<|docstring|>create a list of validator addresses with prefix fxvaloper<|endoftext|>
834dcdfd8ffb5c0717add245c5d010234ff0ee0f2f27f813373fba4453cb1f2e
def _get_create_val_event(): '\n get all validator birthdate and corresponding wallet address\n ' validators = [] cmd = Cmd._filter_cmd('create_val', 'cmd_list.json') data = Cmd._get_raw_data(cmd) for create_val in data['txs']: address = create_val['logs'][0]['events'][0]['attributes'][0]['value'] wallet_address = create_val['logs'][0]['events'][1]['attributes'][2]['value'] moniker = create_val['tx']['body']['messages'][0]['description']['moniker'] height = create_val['height'] timestamp = create_val['timestamp'] validators.append((address, wallet_address, moniker, height, timestamp)) f = open('genesis.json') data = json.load(f) for i in data['data']: address = i[2] wallet_address = i[1] moniker = i[0] height = 0 timestamp = '2021-07-05T04:00:00Z' validators.append((address, wallet_address, moniker, height, timestamp)) return validators
get all validator birthdate and corresponding wallet address
Data.py
_get_create_val_event
FunctionX/validator_queries
0
python
def _get_create_val_event(): '\n \n ' validators = [] cmd = Cmd._filter_cmd('create_val', 'cmd_list.json') data = Cmd._get_raw_data(cmd) for create_val in data['txs']: address = create_val['logs'][0]['events'][0]['attributes'][0]['value'] wallet_address = create_val['logs'][0]['events'][1]['attributes'][2]['value'] moniker = create_val['tx']['body']['messages'][0]['description']['moniker'] height = create_val['height'] timestamp = create_val['timestamp'] validators.append((address, wallet_address, moniker, height, timestamp)) f = open('genesis.json') data = json.load(f) for i in data['data']: address = i[2] wallet_address = i[1] moniker = i[0] height = 0 timestamp = '2021-07-05T04:00:00Z' validators.append((address, wallet_address, moniker, height, timestamp)) return validators
def _get_create_val_event(): '\n \n ' validators = [] cmd = Cmd._filter_cmd('create_val', 'cmd_list.json') data = Cmd._get_raw_data(cmd) for create_val in data['txs']: address = create_val['logs'][0]['events'][0]['attributes'][0]['value'] wallet_address = create_val['logs'][0]['events'][1]['attributes'][2]['value'] moniker = create_val['tx']['body']['messages'][0]['description']['moniker'] height = create_val['height'] timestamp = create_val['timestamp'] validators.append((address, wallet_address, moniker, height, timestamp)) f = open('genesis.json') data = json.load(f) for i in data['data']: address = i[2] wallet_address = i[1] moniker = i[0] height = 0 timestamp = '2021-07-05T04:00:00Z' validators.append((address, wallet_address, moniker, height, timestamp)) return validators<|docstring|>get all validator birthdate and corresponding wallet address<|endoftext|>
70ba9d5fd4c87cac94036523dc096c8840a2656b274d55d3abde6a9418c7656d
def _get_val_outstanding_comms(): '\n get outstanding comms for validator\n ' values = [] val_list = _get_val_list() for val in val_list: cmd = Cmd._filter_cmd('val_outstanding_comms', 'cmd_list.json') cmd[4] = val commission_data = Cmd._get_raw_data(cmd) if (len(commission_data['commission']) > 0): commission = (float(commission_data['commission'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((val, commission)) return values
get outstanding comms for validator
Data.py
_get_val_outstanding_comms
FunctionX/validator_queries
0
python
def _get_val_outstanding_comms(): '\n \n ' values = [] val_list = _get_val_list() for val in val_list: cmd = Cmd._filter_cmd('val_outstanding_comms', 'cmd_list.json') cmd[4] = val commission_data = Cmd._get_raw_data(cmd) if (len(commission_data['commission']) > 0): commission = (float(commission_data['commission'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((val, commission)) return values
def _get_val_outstanding_comms(): '\n \n ' values = [] val_list = _get_val_list() for val in val_list: cmd = Cmd._filter_cmd('val_outstanding_comms', 'cmd_list.json') cmd[4] = val commission_data = Cmd._get_raw_data(cmd) if (len(commission_data['commission']) > 0): commission = (float(commission_data['commission'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((val, commission)) return values<|docstring|>get outstanding comms for validator<|endoftext|>
af7d0e1c9537b2d07e2e938de493b6d0318dcb7d98c347706492b6dd5737adbe
def _get_val_outstanding_delegated_rewards(): '\n get delegated rewards\n ' values = [] val_info = _get_create_val_event() for v in val_info: wallet_address = v[1] cmd = Cmd._filter_cmd('delegator_rewards', 'cmd_list.json') cmd[4] = wallet_address rewards_data = Cmd._get_raw_data(cmd) if (len(rewards_data['rewards']) > 0): rewards = (float(rewards_data['total'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((wallet_address, rewards)) return values
get delegated rewards
Data.py
_get_val_outstanding_delegated_rewards
FunctionX/validator_queries
0
python
def _get_val_outstanding_delegated_rewards(): '\n \n ' values = [] val_info = _get_create_val_event() for v in val_info: wallet_address = v[1] cmd = Cmd._filter_cmd('delegator_rewards', 'cmd_list.json') cmd[4] = wallet_address rewards_data = Cmd._get_raw_data(cmd) if (len(rewards_data['rewards']) > 0): rewards = (float(rewards_data['total'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((wallet_address, rewards)) return values
def _get_val_outstanding_delegated_rewards(): '\n \n ' values = [] val_info = _get_create_val_event() for v in val_info: wallet_address = v[1] cmd = Cmd._filter_cmd('delegator_rewards', 'cmd_list.json') cmd[4] = wallet_address rewards_data = Cmd._get_raw_data(cmd) if (len(rewards_data['rewards']) > 0): rewards = (float(rewards_data['total'][0]['amount']) / (10 ** 18)) else: commission = 0 values.append((wallet_address, rewards)) return values<|docstring|>get delegated rewards<|endoftext|>
c83e15daf8e4f179e7bd61936aaff08fdfd92f73654f195a62e559eeadeeb23e
def _get_all_val_withdrawals(): '\n filters out all validator withdrawals "withdraw_rewards" & "withdraw_commission" and returns it in a dictionary with the following format:\n [\n {\n "EXAMPLE_KEY": {\n "3159434": {\n "withdraw_rewards": "11552361789042999846400FX",\n "withdraw_commission": "23699689167352852164225FX"\n }\n }\n },\n {\n "EXAMPLE_KEY": {\n "2681576": {\n "withdraw_rewards": "350840103035052000FX",\n "withdraw_commission": "1403062229124345928FX"\n },\n "2897873": {\n "withdraw_rewards": "5199425185789476500FX",\n "withdraw_commission": "32675461989113084503FX"\n },\n "2948881": {\n "withdraw_rewards": "9656169816245352000FX",\n "withdraw_commission": "38899317032588483569FX"\n },\n "3120109": {\n "withdraw_rewards": "32454562953435984000FX",\n "withdraw_commission": "116041948186529137740FX"\n },\n "3328120": {\n "withdraw_rewards": "1853210859617480FX",\n "withdraw_commission": "198871256725641601371FX"\n }\n }\n ]\n ' val_list = _get_val_list() msg_action = 'withdraw_validator_commission' all_val_withdrawals = [] for val in val_list: val_withdrawals = {} msg = Cmd._create_msg_string(val, msg_action) cmd = Cmd._filter_cmd('val_withdrawals', 'cmd_list.json') cmd[4] = msg withdraw_events = Cmd._get_raw_data(cmd) withdrawals = {} for tx in withdraw_events['txs']: withdrawals[tx['height']] = {} for log in tx['logs']: for event in log['events']: if (event['type'] == 'withdraw_rewards'): for attribute in event['attributes']: if (attribute['key'] == 'amount'): withdrawals[tx['height']]['withdraw_rewards'] = attribute['value'] if (event['type'] == 'withdraw_commission'): if (len(event['attributes'][0]) < 2): withdrawals[tx['height']]['withdraw_commission'] = '0FX' else: withdrawals[tx['height']]['withdraw_commission'] = event['attributes'][0]['value'] else: pass val_withdrawals[val] = withdrawals all_val_withdrawals.append(val_withdrawals) return all_val_withdrawals
filters out all validator withdrawals "withdraw_rewards" & "withdraw_commission" and returns it in a dictionary with the following format: [ { "EXAMPLE_KEY": { "3159434": { "withdraw_rewards": "11552361789042999846400FX", "withdraw_commission": "23699689167352852164225FX" } } }, { "EXAMPLE_KEY": { "2681576": { "withdraw_rewards": "350840103035052000FX", "withdraw_commission": "1403062229124345928FX" }, "2897873": { "withdraw_rewards": "5199425185789476500FX", "withdraw_commission": "32675461989113084503FX" }, "2948881": { "withdraw_rewards": "9656169816245352000FX", "withdraw_commission": "38899317032588483569FX" }, "3120109": { "withdraw_rewards": "32454562953435984000FX", "withdraw_commission": "116041948186529137740FX" }, "3328120": { "withdraw_rewards": "1853210859617480FX", "withdraw_commission": "198871256725641601371FX" } } ]
Data.py
_get_all_val_withdrawals
FunctionX/validator_queries
0
python
def _get_all_val_withdrawals(): '\n filters out all validator withdrawals "withdraw_rewards" & "withdraw_commission" and returns it in a dictionary with the following format:\n [\n {\n "EXAMPLE_KEY": {\n "3159434": {\n "withdraw_rewards": "11552361789042999846400FX",\n "withdraw_commission": "23699689167352852164225FX"\n }\n }\n },\n {\n "EXAMPLE_KEY": {\n "2681576": {\n "withdraw_rewards": "350840103035052000FX",\n "withdraw_commission": "1403062229124345928FX"\n },\n "2897873": {\n "withdraw_rewards": "5199425185789476500FX",\n "withdraw_commission": "32675461989113084503FX"\n },\n "2948881": {\n "withdraw_rewards": "9656169816245352000FX",\n "withdraw_commission": "38899317032588483569FX"\n },\n "3120109": {\n "withdraw_rewards": "32454562953435984000FX",\n "withdraw_commission": "116041948186529137740FX"\n },\n "3328120": {\n "withdraw_rewards": "1853210859617480FX",\n "withdraw_commission": "198871256725641601371FX"\n }\n }\n ]\n ' val_list = _get_val_list() msg_action = 'withdraw_validator_commission' all_val_withdrawals = [] for val in val_list: val_withdrawals = {} msg = Cmd._create_msg_string(val, msg_action) cmd = Cmd._filter_cmd('val_withdrawals', 'cmd_list.json') cmd[4] = msg withdraw_events = Cmd._get_raw_data(cmd) withdrawals = {} for tx in withdraw_events['txs']: withdrawals[tx['height']] = {} for log in tx['logs']: for event in log['events']: if (event['type'] == 'withdraw_rewards'): for attribute in event['attributes']: if (attribute['key'] == 'amount'): withdrawals[tx['height']]['withdraw_rewards'] = attribute['value'] if (event['type'] == 'withdraw_commission'): if (len(event['attributes'][0]) < 2): withdrawals[tx['height']]['withdraw_commission'] = '0FX' else: withdrawals[tx['height']]['withdraw_commission'] = event['attributes'][0]['value'] else: pass val_withdrawals[val] = withdrawals all_val_withdrawals.append(val_withdrawals) return all_val_withdrawals
def _get_all_val_withdrawals(): '\n filters out all validator withdrawals "withdraw_rewards" & "withdraw_commission" and returns it in a dictionary with the following format:\n [\n {\n "EXAMPLE_KEY": {\n "3159434": {\n "withdraw_rewards": "11552361789042999846400FX",\n "withdraw_commission": "23699689167352852164225FX"\n }\n }\n },\n {\n "EXAMPLE_KEY": {\n "2681576": {\n "withdraw_rewards": "350840103035052000FX",\n "withdraw_commission": "1403062229124345928FX"\n },\n "2897873": {\n "withdraw_rewards": "5199425185789476500FX",\n "withdraw_commission": "32675461989113084503FX"\n },\n "2948881": {\n "withdraw_rewards": "9656169816245352000FX",\n "withdraw_commission": "38899317032588483569FX"\n },\n "3120109": {\n "withdraw_rewards": "32454562953435984000FX",\n "withdraw_commission": "116041948186529137740FX"\n },\n "3328120": {\n "withdraw_rewards": "1853210859617480FX",\n "withdraw_commission": "198871256725641601371FX"\n }\n }\n ]\n ' val_list = _get_val_list() msg_action = 'withdraw_validator_commission' all_val_withdrawals = [] for val in val_list: val_withdrawals = {} msg = Cmd._create_msg_string(val, msg_action) cmd = Cmd._filter_cmd('val_withdrawals', 'cmd_list.json') cmd[4] = msg withdraw_events = Cmd._get_raw_data(cmd) withdrawals = {} for tx in withdraw_events['txs']: withdrawals[tx['height']] = {} for log in tx['logs']: for event in log['events']: if (event['type'] == 'withdraw_rewards'): for attribute in event['attributes']: if (attribute['key'] == 'amount'): withdrawals[tx['height']]['withdraw_rewards'] = attribute['value'] if (event['type'] == 'withdraw_commission'): if (len(event['attributes'][0]) < 2): withdrawals[tx['height']]['withdraw_commission'] = '0FX' else: withdrawals[tx['height']]['withdraw_commission'] = event['attributes'][0]['value'] else: pass val_withdrawals[val] = withdrawals all_val_withdrawals.append(val_withdrawals) return all_val_withdrawals<|docstring|>filters out all validator withdrawals "withdraw_rewards" & "withdraw_commission" and returns it in a dictionary with the following format: [ { "EXAMPLE_KEY": { "3159434": { "withdraw_rewards": "11552361789042999846400FX", "withdraw_commission": "23699689167352852164225FX" } } }, { "EXAMPLE_KEY": { "2681576": { "withdraw_rewards": "350840103035052000FX", "withdraw_commission": "1403062229124345928FX" }, "2897873": { "withdraw_rewards": "5199425185789476500FX", "withdraw_commission": "32675461989113084503FX" }, "2948881": { "withdraw_rewards": "9656169816245352000FX", "withdraw_commission": "38899317032588483569FX" }, "3120109": { "withdraw_rewards": "32454562953435984000FX", "withdraw_commission": "116041948186529137740FX" }, "3328120": { "withdraw_rewards": "1853210859617480FX", "withdraw_commission": "198871256725641601371FX" } } ]<|endoftext|>
b716089f960c1394ba27998a10e5db86e580eb84e906d92b5039ce3dcd2b195c
def _get_val_fxcored_status() -> dict: '\n query all status for validators\n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) return data
query all status for validators
Data.py
_get_val_fxcored_status
FunctionX/validator_queries
0
python
def _get_val_fxcored_status() -> dict: '\n \n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) return data
def _get_val_fxcored_status() -> dict: '\n \n ' cmd = Cmd._filter_cmd('validator_info', 'cmd_list.json') data = Cmd._get_raw_data(cmd) return data<|docstring|>query all status for validators<|endoftext|>
02da70c53b3a9e60405faffead547736a6185b269c77d77bac1b58103e1f4c64
async def trusted_sync(self, full_node: WSChivesConnection): '\n Performs a one-time sync with each trusted peer, subscribing to interested puzzle hashes and coin ids.\n ' self.log.info('Starting trusted sync') assert (self.wallet_state_manager is not None) self.wallet_state_manager.set_sync_mode(True) start_time = time.time() current_height: uint32 = self.wallet_state_manager.blockchain.get_peak_height() request_height: uint32 = uint32(max(0, (current_height - 1000))) already_checked: Set[bytes32] = set() continue_while: bool = True while continue_while: all_puzzle_hashes: List[bytes32] = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) interested_puzzle_hashes = [t[0] for t in (await self.wallet_state_manager.interested_store.get_interested_puzzle_hashes())] all_puzzle_hashes.extend(interested_puzzle_hashes) to_check: List[bytes32] = [] for ph in all_puzzle_hashes: if (ph in already_checked): continue else: to_check.append(ph) already_checked.add(ph) if (len(to_check) == 1000): break (await self.subscribe_to_phs(to_check, full_node, request_height)) check_again = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) self.log.debug(f'already_checked {len(already_checked)}') self.log.debug(f'check_again {len(check_again)}') self.log.debug(f'all_puzzle_hashes {len(all_puzzle_hashes)}') (await self.wallet_state_manager.create_more_puzzle_hashes()) continue_while = False for ph in check_again: if (ph not in already_checked): continue_while = True break all_coins: Set[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coins_to_check(request_height)) all_coin_names: List[bytes32] = [coin_record.name() for coin_record in all_coins] (removed_dict, added_dict) = (await self.wallet_state_manager.trade_manager.get_coins_of_interest()) all_coin_names.extend(removed_dict.keys()) all_coin_names.extend(added_dict.keys()) all_coin_names.extend((await self.wallet_state_manager.interested_store.get_interested_coin_ids())) one_k_chunks = chunks(all_coin_names, 1000) for chunk in one_k_chunks: (await self.subscribe_to_coin_updates(chunk, full_node, request_height)) self.wallet_state_manager.set_sync_mode(False) end_time = time.time() duration = (end_time - start_time) self.log.info(f'Trusted sync duration was: {duration}') for (wallet_id, wallet) in self.wallet_state_manager.wallets.items(): self.wallet_state_manager.state_changed('coin_removed', wallet_id) self.wallet_state_manager.state_changed('coin_added', wallet_id) self.synced_peers.add(full_node.peer_node_id)
Performs a one-time sync with each trusted peer, subscribing to interested puzzle hashes and coin ids.
chives/wallet/wallet_node.py
trusted_sync
HiveProject2021/chives-light-wallet
7
python
async def trusted_sync(self, full_node: WSChivesConnection): '\n \n ' self.log.info('Starting trusted sync') assert (self.wallet_state_manager is not None) self.wallet_state_manager.set_sync_mode(True) start_time = time.time() current_height: uint32 = self.wallet_state_manager.blockchain.get_peak_height() request_height: uint32 = uint32(max(0, (current_height - 1000))) already_checked: Set[bytes32] = set() continue_while: bool = True while continue_while: all_puzzle_hashes: List[bytes32] = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) interested_puzzle_hashes = [t[0] for t in (await self.wallet_state_manager.interested_store.get_interested_puzzle_hashes())] all_puzzle_hashes.extend(interested_puzzle_hashes) to_check: List[bytes32] = [] for ph in all_puzzle_hashes: if (ph in already_checked): continue else: to_check.append(ph) already_checked.add(ph) if (len(to_check) == 1000): break (await self.subscribe_to_phs(to_check, full_node, request_height)) check_again = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) self.log.debug(f'already_checked {len(already_checked)}') self.log.debug(f'check_again {len(check_again)}') self.log.debug(f'all_puzzle_hashes {len(all_puzzle_hashes)}') (await self.wallet_state_manager.create_more_puzzle_hashes()) continue_while = False for ph in check_again: if (ph not in already_checked): continue_while = True break all_coins: Set[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coins_to_check(request_height)) all_coin_names: List[bytes32] = [coin_record.name() for coin_record in all_coins] (removed_dict, added_dict) = (await self.wallet_state_manager.trade_manager.get_coins_of_interest()) all_coin_names.extend(removed_dict.keys()) all_coin_names.extend(added_dict.keys()) all_coin_names.extend((await self.wallet_state_manager.interested_store.get_interested_coin_ids())) one_k_chunks = chunks(all_coin_names, 1000) for chunk in one_k_chunks: (await self.subscribe_to_coin_updates(chunk, full_node, request_height)) self.wallet_state_manager.set_sync_mode(False) end_time = time.time() duration = (end_time - start_time) self.log.info(f'Trusted sync duration was: {duration}') for (wallet_id, wallet) in self.wallet_state_manager.wallets.items(): self.wallet_state_manager.state_changed('coin_removed', wallet_id) self.wallet_state_manager.state_changed('coin_added', wallet_id) self.synced_peers.add(full_node.peer_node_id)
async def trusted_sync(self, full_node: WSChivesConnection): '\n \n ' self.log.info('Starting trusted sync') assert (self.wallet_state_manager is not None) self.wallet_state_manager.set_sync_mode(True) start_time = time.time() current_height: uint32 = self.wallet_state_manager.blockchain.get_peak_height() request_height: uint32 = uint32(max(0, (current_height - 1000))) already_checked: Set[bytes32] = set() continue_while: bool = True while continue_while: all_puzzle_hashes: List[bytes32] = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) interested_puzzle_hashes = [t[0] for t in (await self.wallet_state_manager.interested_store.get_interested_puzzle_hashes())] all_puzzle_hashes.extend(interested_puzzle_hashes) to_check: List[bytes32] = [] for ph in all_puzzle_hashes: if (ph in already_checked): continue else: to_check.append(ph) already_checked.add(ph) if (len(to_check) == 1000): break (await self.subscribe_to_phs(to_check, full_node, request_height)) check_again = list((await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())) self.log.debug(f'already_checked {len(already_checked)}') self.log.debug(f'check_again {len(check_again)}') self.log.debug(f'all_puzzle_hashes {len(all_puzzle_hashes)}') (await self.wallet_state_manager.create_more_puzzle_hashes()) continue_while = False for ph in check_again: if (ph not in already_checked): continue_while = True break all_coins: Set[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coins_to_check(request_height)) all_coin_names: List[bytes32] = [coin_record.name() for coin_record in all_coins] (removed_dict, added_dict) = (await self.wallet_state_manager.trade_manager.get_coins_of_interest()) all_coin_names.extend(removed_dict.keys()) all_coin_names.extend(added_dict.keys()) all_coin_names.extend((await self.wallet_state_manager.interested_store.get_interested_coin_ids())) one_k_chunks = chunks(all_coin_names, 1000) for chunk in one_k_chunks: (await self.subscribe_to_coin_updates(chunk, full_node, request_height)) self.wallet_state_manager.set_sync_mode(False) end_time = time.time() duration = (end_time - start_time) self.log.info(f'Trusted sync duration was: {duration}') for (wallet_id, wallet) in self.wallet_state_manager.wallets.items(): self.wallet_state_manager.state_changed('coin_removed', wallet_id) self.wallet_state_manager.state_changed('coin_added', wallet_id) self.synced_peers.add(full_node.peer_node_id)<|docstring|>Performs a one-time sync with each trusted peer, subscribing to interested puzzle hashes and coin ids.<|endoftext|>
ed625342e106ac45c0b6209c9096f6a3e7cb09d8c4d8de85e8b7019ceda7a240
async def subscribe_to_phs(self, puzzle_hashes: List[bytes32], peer: WSChivesConnection, height=uint32(0)): '\n Tell full nodes that we are interested in puzzle hashes, and for trusted connections, add the new coin state\n for the puzzle hashes.\n ' msg = wallet_protocol.RegisterForPhUpdates(puzzle_hashes, height) all_state: Optional[RespondToPhUpdates] = (await peer.register_interest_in_puzzle_hash(msg)) if ((all_state is not None) and self.is_trusted(peer)): assert (self.wallet_state_manager is not None) (await self.wallet_state_manager.new_coin_state(all_state.coin_states, peer))
Tell full nodes that we are interested in puzzle hashes, and for trusted connections, add the new coin state for the puzzle hashes.
chives/wallet/wallet_node.py
subscribe_to_phs
HiveProject2021/chives-light-wallet
7
python
async def subscribe_to_phs(self, puzzle_hashes: List[bytes32], peer: WSChivesConnection, height=uint32(0)): '\n Tell full nodes that we are interested in puzzle hashes, and for trusted connections, add the new coin state\n for the puzzle hashes.\n ' msg = wallet_protocol.RegisterForPhUpdates(puzzle_hashes, height) all_state: Optional[RespondToPhUpdates] = (await peer.register_interest_in_puzzle_hash(msg)) if ((all_state is not None) and self.is_trusted(peer)): assert (self.wallet_state_manager is not None) (await self.wallet_state_manager.new_coin_state(all_state.coin_states, peer))
async def subscribe_to_phs(self, puzzle_hashes: List[bytes32], peer: WSChivesConnection, height=uint32(0)): '\n Tell full nodes that we are interested in puzzle hashes, and for trusted connections, add the new coin state\n for the puzzle hashes.\n ' msg = wallet_protocol.RegisterForPhUpdates(puzzle_hashes, height) all_state: Optional[RespondToPhUpdates] = (await peer.register_interest_in_puzzle_hash(msg)) if ((all_state is not None) and self.is_trusted(peer)): assert (self.wallet_state_manager is not None) (await self.wallet_state_manager.new_coin_state(all_state.coin_states, peer))<|docstring|>Tell full nodes that we are interested in puzzle hashes, and for trusted connections, add the new coin state for the puzzle hashes.<|endoftext|>
a7008a8754ab3fcf8a12af49560a029c63a4baa869c48eb0aad7f1e7bd749a8b
async def subscribe_to_coin_updates(self, coin_names, peer, height=uint32(0)): '\n Tell full nodes that we are interested in coin ids, and for trusted connections, add the new coin state\n for the coin changes.\n ' msg = wallet_protocol.RegisterForCoinUpdates(coin_names, height) all_coins_state: Optional[RespondToCoinUpdates] = (await peer.register_interest_in_coin(msg)) if ((all_coins_state is not None) and self.is_trusted(peer)): (await self.wallet_state_manager.new_coin_state(all_coins_state.coin_states, peer))
Tell full nodes that we are interested in coin ids, and for trusted connections, add the new coin state for the coin changes.
chives/wallet/wallet_node.py
subscribe_to_coin_updates
HiveProject2021/chives-light-wallet
7
python
async def subscribe_to_coin_updates(self, coin_names, peer, height=uint32(0)): '\n Tell full nodes that we are interested in coin ids, and for trusted connections, add the new coin state\n for the coin changes.\n ' msg = wallet_protocol.RegisterForCoinUpdates(coin_names, height) all_coins_state: Optional[RespondToCoinUpdates] = (await peer.register_interest_in_coin(msg)) if ((all_coins_state is not None) and self.is_trusted(peer)): (await self.wallet_state_manager.new_coin_state(all_coins_state.coin_states, peer))
async def subscribe_to_coin_updates(self, coin_names, peer, height=uint32(0)): '\n Tell full nodes that we are interested in coin ids, and for trusted connections, add the new coin state\n for the coin changes.\n ' msg = wallet_protocol.RegisterForCoinUpdates(coin_names, height) all_coins_state: Optional[RespondToCoinUpdates] = (await peer.register_interest_in_coin(msg)) if ((all_coins_state is not None) and self.is_trusted(peer)): (await self.wallet_state_manager.new_coin_state(all_coins_state.coin_states, peer))<|docstring|>Tell full nodes that we are interested in coin ids, and for trusted connections, add the new coin state for the coin changes.<|endoftext|>
64b61688e42e046e689b8f1e6d707713b1b620ca86f3eaa30830db04ee5c7fa1
async def get_timestamp_for_height(self, height: uint32) -> uint64: '\n Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds\n a transaction block\n ' if (height in self.height_to_time): return self.height_to_time[height] peer = self.get_full_node_peer() assert (peer is not None) curr_height: uint32 = height while True: request = wallet_protocol.RequestBlockHeader(curr_height) response: Optional[RespondBlockHeader] = (await peer.request_block_header(request)) if ((response is None) or (not isinstance(response, RespondBlockHeader))): raise ValueError(f'Invalid response from {peer}, {response}') if (response.header_block.foliage_transaction_block is not None): self.height_to_time[height] = response.header_block.foliage_transaction_block.timestamp return response.header_block.foliage_transaction_block.timestamp curr_height = uint32((curr_height - 1))
Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds a transaction block
chives/wallet/wallet_node.py
get_timestamp_for_height
HiveProject2021/chives-light-wallet
7
python
async def get_timestamp_for_height(self, height: uint32) -> uint64: '\n Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds\n a transaction block\n ' if (height in self.height_to_time): return self.height_to_time[height] peer = self.get_full_node_peer() assert (peer is not None) curr_height: uint32 = height while True: request = wallet_protocol.RequestBlockHeader(curr_height) response: Optional[RespondBlockHeader] = (await peer.request_block_header(request)) if ((response is None) or (not isinstance(response, RespondBlockHeader))): raise ValueError(f'Invalid response from {peer}, {response}') if (response.header_block.foliage_transaction_block is not None): self.height_to_time[height] = response.header_block.foliage_transaction_block.timestamp return response.header_block.foliage_transaction_block.timestamp curr_height = uint32((curr_height - 1))
async def get_timestamp_for_height(self, height: uint32) -> uint64: '\n Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds\n a transaction block\n ' if (height in self.height_to_time): return self.height_to_time[height] peer = self.get_full_node_peer() assert (peer is not None) curr_height: uint32 = height while True: request = wallet_protocol.RequestBlockHeader(curr_height) response: Optional[RespondBlockHeader] = (await peer.request_block_header(request)) if ((response is None) or (not isinstance(response, RespondBlockHeader))): raise ValueError(f'Invalid response from {peer}, {response}') if (response.header_block.foliage_transaction_block is not None): self.height_to_time[height] = response.header_block.foliage_transaction_block.timestamp return response.header_block.foliage_transaction_block.timestamp curr_height = uint32((curr_height - 1))<|docstring|>Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds a transaction block<|endoftext|>
11033983d65736d6619694035142f78084fb38b23623b0907113f31f35c74c93
async def validate_received_state_from_peer(self, coin_states: List[CoinState], peer, weight_proof: WeightProof, peer_request_cache: PeerRequestCache, return_old_state: bool) -> List[CoinState]: '\n Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states\n is False, only new states that are not in the coin_store are returned.\n ' assert (self.wallet_state_manager is not None) all_validated_states = [] total = len(coin_states) for (coin_idx, coin_state) in enumerate(coin_states): looked_up_coin: Optional[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) if ((looked_up_coin is not None) and (coin_state.created_height is not None) and (looked_up_coin.confirmed_block_height == coin_state.created_height)): if looked_up_coin.spent: if (looked_up_coin.spent_block_height == coin_state.spent_height): if return_old_state: all_validated_states.append(coin_state) continue elif (coin_state.spent_height is None): if return_old_state: all_validated_states.append(coin_state) continue if (coin_state.get_hash() in peer_request_cache.states_validated): all_validated_states.append(coin_state) continue self.log.info(f'Validating {(coin_idx + 1)} of {total}') spent_height = coin_state.spent_height confirmed_height = coin_state.created_height current = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) current_spent_height = None if ((current is not None) and (current.spent_block_height != 0)): current_spent_height = current.spent_block_height wp_tip_height = weight_proof.recent_chain_data[(- 1)].height if (((confirmed_height is not None) and (confirmed_height > wp_tip_height)) or ((spent_height is not None) and (spent_height > wp_tip_height))): continue elif ((current is not None) and (current_spent_height == spent_height) and (current.confirmed_block_height == confirmed_height)): all_validated_states.append(coin_state) continue else: if (confirmed_height is None): peer.close(9999) raise ValueError('Should not receive state for non-existing coin') self.log.debug(f'Validating state: {coin_state}') if (confirmed_height in peer_request_cache.blocks): state_block: HeaderBlock = peer_request_cache.blocks[confirmed_height] else: request = RequestHeaderBlocks(confirmed_height, confirmed_height) res = (await peer.request_header_blocks(request)) state_block = res.header_blocks[0] peer_request_cache.blocks[confirmed_height] = state_block assert (state_block.foliage_transaction_block is not None) validate_additions_result = (await request_and_validate_additions(peer, state_block.height, state_block.header_hash, coin_state.coin.puzzle_hash, state_block.foliage_transaction_block.additions_root)) if (validate_additions_result is False): peer.close(9999) raise ValueError(f'Addition did not validate: {state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if ((spent_height is None) and (current is not None) and (current.spent_block_height != 0)): if (spent_height in peer_request_cache.blocks): spent_state_block: HeaderBlock = peer_request_cache.blocks[current.spent_block_height] else: request = RequestHeaderBlocks(current.spent_block_height, current.spent_block_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == current.spent_block_height) peer_request_cache.blocks[current.spent_block_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result: bool = (await request_and_validate_removals(peer, current.spent_block_height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError('Validation failed') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if (spent_height is not None): if (spent_height in peer_request_cache.blocks): spent_state_block = peer_request_cache.blocks[spent_height] else: request = RequestHeaderBlocks(spent_height, spent_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == spent_height) peer_request_cache.blocks[spent_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result = (await request_and_validate_removals(peer, spent_state_block.height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError(f'Removals did not validate {spent_state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') all_validated_states.append(coin_state) peer_request_cache.states_validated[coin_state.get_hash()] = coin_state return all_validated_states
Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states is False, only new states that are not in the coin_store are returned.
chives/wallet/wallet_node.py
validate_received_state_from_peer
HiveProject2021/chives-light-wallet
7
python
async def validate_received_state_from_peer(self, coin_states: List[CoinState], peer, weight_proof: WeightProof, peer_request_cache: PeerRequestCache, return_old_state: bool) -> List[CoinState]: '\n Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states\n is False, only new states that are not in the coin_store are returned.\n ' assert (self.wallet_state_manager is not None) all_validated_states = [] total = len(coin_states) for (coin_idx, coin_state) in enumerate(coin_states): looked_up_coin: Optional[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) if ((looked_up_coin is not None) and (coin_state.created_height is not None) and (looked_up_coin.confirmed_block_height == coin_state.created_height)): if looked_up_coin.spent: if (looked_up_coin.spent_block_height == coin_state.spent_height): if return_old_state: all_validated_states.append(coin_state) continue elif (coin_state.spent_height is None): if return_old_state: all_validated_states.append(coin_state) continue if (coin_state.get_hash() in peer_request_cache.states_validated): all_validated_states.append(coin_state) continue self.log.info(f'Validating {(coin_idx + 1)} of {total}') spent_height = coin_state.spent_height confirmed_height = coin_state.created_height current = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) current_spent_height = None if ((current is not None) and (current.spent_block_height != 0)): current_spent_height = current.spent_block_height wp_tip_height = weight_proof.recent_chain_data[(- 1)].height if (((confirmed_height is not None) and (confirmed_height > wp_tip_height)) or ((spent_height is not None) and (spent_height > wp_tip_height))): continue elif ((current is not None) and (current_spent_height == spent_height) and (current.confirmed_block_height == confirmed_height)): all_validated_states.append(coin_state) continue else: if (confirmed_height is None): peer.close(9999) raise ValueError('Should not receive state for non-existing coin') self.log.debug(f'Validating state: {coin_state}') if (confirmed_height in peer_request_cache.blocks): state_block: HeaderBlock = peer_request_cache.blocks[confirmed_height] else: request = RequestHeaderBlocks(confirmed_height, confirmed_height) res = (await peer.request_header_blocks(request)) state_block = res.header_blocks[0] peer_request_cache.blocks[confirmed_height] = state_block assert (state_block.foliage_transaction_block is not None) validate_additions_result = (await request_and_validate_additions(peer, state_block.height, state_block.header_hash, coin_state.coin.puzzle_hash, state_block.foliage_transaction_block.additions_root)) if (validate_additions_result is False): peer.close(9999) raise ValueError(f'Addition did not validate: {state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if ((spent_height is None) and (current is not None) and (current.spent_block_height != 0)): if (spent_height in peer_request_cache.blocks): spent_state_block: HeaderBlock = peer_request_cache.blocks[current.spent_block_height] else: request = RequestHeaderBlocks(current.spent_block_height, current.spent_block_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == current.spent_block_height) peer_request_cache.blocks[current.spent_block_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result: bool = (await request_and_validate_removals(peer, current.spent_block_height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError('Validation failed') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if (spent_height is not None): if (spent_height in peer_request_cache.blocks): spent_state_block = peer_request_cache.blocks[spent_height] else: request = RequestHeaderBlocks(spent_height, spent_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == spent_height) peer_request_cache.blocks[spent_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result = (await request_and_validate_removals(peer, spent_state_block.height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError(f'Removals did not validate {spent_state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') all_validated_states.append(coin_state) peer_request_cache.states_validated[coin_state.get_hash()] = coin_state return all_validated_states
async def validate_received_state_from_peer(self, coin_states: List[CoinState], peer, weight_proof: WeightProof, peer_request_cache: PeerRequestCache, return_old_state: bool) -> List[CoinState]: '\n Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states\n is False, only new states that are not in the coin_store are returned.\n ' assert (self.wallet_state_manager is not None) all_validated_states = [] total = len(coin_states) for (coin_idx, coin_state) in enumerate(coin_states): looked_up_coin: Optional[WalletCoinRecord] = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) if ((looked_up_coin is not None) and (coin_state.created_height is not None) and (looked_up_coin.confirmed_block_height == coin_state.created_height)): if looked_up_coin.spent: if (looked_up_coin.spent_block_height == coin_state.spent_height): if return_old_state: all_validated_states.append(coin_state) continue elif (coin_state.spent_height is None): if return_old_state: all_validated_states.append(coin_state) continue if (coin_state.get_hash() in peer_request_cache.states_validated): all_validated_states.append(coin_state) continue self.log.info(f'Validating {(coin_idx + 1)} of {total}') spent_height = coin_state.spent_height confirmed_height = coin_state.created_height current = (await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())) current_spent_height = None if ((current is not None) and (current.spent_block_height != 0)): current_spent_height = current.spent_block_height wp_tip_height = weight_proof.recent_chain_data[(- 1)].height if (((confirmed_height is not None) and (confirmed_height > wp_tip_height)) or ((spent_height is not None) and (spent_height > wp_tip_height))): continue elif ((current is not None) and (current_spent_height == spent_height) and (current.confirmed_block_height == confirmed_height)): all_validated_states.append(coin_state) continue else: if (confirmed_height is None): peer.close(9999) raise ValueError('Should not receive state for non-existing coin') self.log.debug(f'Validating state: {coin_state}') if (confirmed_height in peer_request_cache.blocks): state_block: HeaderBlock = peer_request_cache.blocks[confirmed_height] else: request = RequestHeaderBlocks(confirmed_height, confirmed_height) res = (await peer.request_header_blocks(request)) state_block = res.header_blocks[0] peer_request_cache.blocks[confirmed_height] = state_block assert (state_block.foliage_transaction_block is not None) validate_additions_result = (await request_and_validate_additions(peer, state_block.height, state_block.header_hash, coin_state.coin.puzzle_hash, state_block.foliage_transaction_block.additions_root)) if (validate_additions_result is False): peer.close(9999) raise ValueError(f'Addition did not validate: {state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if ((spent_height is None) and (current is not None) and (current.spent_block_height != 0)): if (spent_height in peer_request_cache.blocks): spent_state_block: HeaderBlock = peer_request_cache.blocks[current.spent_block_height] else: request = RequestHeaderBlocks(current.spent_block_height, current.spent_block_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == current.spent_block_height) peer_request_cache.blocks[current.spent_block_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result: bool = (await request_and_validate_removals(peer, current.spent_block_height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError('Validation failed') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') if (spent_height is not None): if (spent_height in peer_request_cache.blocks): spent_state_block = peer_request_cache.blocks[spent_height] else: request = RequestHeaderBlocks(spent_height, spent_height) res = (await peer.request_header_blocks(request)) spent_state_block = res.header_blocks[0] assert (spent_state_block.height == spent_height) peer_request_cache.blocks[spent_height] = spent_state_block assert (spent_state_block.foliage_transaction_block is not None) validate_removals_result = (await request_and_validate_removals(peer, spent_state_block.height, spent_state_block.header_hash, coin_state.coin.name(), spent_state_block.foliage_transaction_block.removals_root)) if (validate_removals_result is False): peer.close(9999) raise ValueError(f'Removals did not validate {spent_state_block}, {coin_state}') validated = (await self.validate_state(weight_proof, spent_state_block, peer, peer_request_cache)) if (not validated): raise ValueError('Validation failed') all_validated_states.append(coin_state) peer_request_cache.states_validated[coin_state.get_hash()] = coin_state return all_validated_states<|docstring|>Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states is False, only new states that are not in the coin_store are returned.<|endoftext|>
596ece9a42b5dee5e38a79b51978a5b6d101260746641e7908cfac073cf257c1
def api211_snmp_managers_delete_with_http_info(self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Delete SNMP manager\n\n Deletes the SNMP manager object and stops communication with specified managers.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_delete_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
Delete SNMP manager Deletes the SNMP manager object and stops communication with specified managers. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_delete_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pypureclient/flasharray/FA_2_11/api/snmp_managers_api.py
api211_snmp_managers_delete_with_http_info
genegr/py-pure-client
14
python
def api211_snmp_managers_delete_with_http_info(self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Delete SNMP manager\n\n Deletes the SNMP manager object and stops communication with specified managers.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_delete_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api211_snmp_managers_delete_with_http_info(self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Delete SNMP manager\n\n Deletes the SNMP manager object and stops communication with specified managers.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_delete_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)<|docstring|>Delete SNMP manager Deletes the SNMP manager object and stops communication with specified managers. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_delete_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: None If the method is called asynchronously, returns the request thread.<|endoftext|>
5b4ad6991fc5d05077f03100263d8a13e1ce5ff6b68e6ce624ceb9418b2e5e83
def api211_snmp_managers_get_with_http_info(self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP managers\n\n Displays designated SNMP managers and their communication and security attributes.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('continuation_token' in params): query_params.append(('continuation_token', params['continuation_token'])) if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
List SNMP managers Displays designated SNMP managers and their communication and security attributes. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerGetResponse If the method is called asynchronously, returns the request thread.
pypureclient/flasharray/FA_2_11/api/snmp_managers_api.py
api211_snmp_managers_get_with_http_info
genegr/py-pure-client
14
python
def api211_snmp_managers_get_with_http_info(self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP managers\n\n Displays designated SNMP managers and their communication and security attributes.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('continuation_token' in params): query_params.append(('continuation_token', params['continuation_token'])) if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api211_snmp_managers_get_with_http_info(self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP managers\n\n Displays designated SNMP managers and their communication and security attributes.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('continuation_token' in params): query_params.append(('continuation_token', params['continuation_token'])) if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)<|docstring|>List SNMP managers Displays designated SNMP managers and their communication and security attributes. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerGetResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
ac5040e568f45bea79c37a3ee383c4dcc5a5e9aeb0f46738506e3981a19122b5
def api211_snmp_managers_patch_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Modify SNMP manager\n\n Modifies the name or the protocol attributes of the specified SNMP manager.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_patch_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPatch snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_patch`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
Modify SNMP manager Modifies the name or the protocol attributes of the specified SNMP manager. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_patch_with_http_info(snmp_manager, async_req=True) >>> result = thread.get() :param SnmpManagerPatch snmp_manager: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerResponse If the method is called asynchronously, returns the request thread.
pypureclient/flasharray/FA_2_11/api/snmp_managers_api.py
api211_snmp_managers_patch_with_http_info
genegr/py-pure-client
14
python
def api211_snmp_managers_patch_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Modify SNMP manager\n\n Modifies the name or the protocol attributes of the specified SNMP manager.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_patch_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPatch snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_patch`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api211_snmp_managers_patch_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Modify SNMP manager\n\n Modifies the name or the protocol attributes of the specified SNMP manager.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_patch_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPatch snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_patch`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)<|docstring|>Modify SNMP manager Modifies the name or the protocol attributes of the specified SNMP manager. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_patch_with_http_info(snmp_manager, async_req=True) >>> result = thread.get() :param SnmpManagerPatch snmp_manager: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
6332b0aa0be61964ce1e7d4bc34dc3a72a30b861b5f09923f437b197c2ecbbc0
def api211_snmp_managers_post_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Create SNMP manager\n\n Creates a Purity SNMP manager object that identifies a host (SNMP manager) and specifies the protocol attributes for communicating with it. Once a manager object is created, the transmission of SNMP traps is immediately enabled.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_post_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPost snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_post`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
Create SNMP manager Creates a Purity SNMP manager object that identifies a host (SNMP manager) and specifies the protocol attributes for communicating with it. Once a manager object is created, the transmission of SNMP traps is immediately enabled. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_post_with_http_info(snmp_manager, async_req=True) >>> result = thread.get() :param SnmpManagerPost snmp_manager: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerResponse If the method is called asynchronously, returns the request thread.
pypureclient/flasharray/FA_2_11/api/snmp_managers_api.py
api211_snmp_managers_post_with_http_info
genegr/py-pure-client
14
python
def api211_snmp_managers_post_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Create SNMP manager\n\n Creates a Purity SNMP manager object that identifies a host (SNMP manager) and specifies the protocol attributes for communicating with it. Once a manager object is created, the transmission of SNMP traps is immediately enabled.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_post_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPost snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_post`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api211_snmp_managers_post_with_http_info(self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'Create SNMP manager\n\n Creates a Purity SNMP manager object that identifies a host (SNMP manager) and specifies the protocol attributes for communicating with it. Once a manager object is created, the transmission of SNMP traps is immediately enabled.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_post_with_http_info(snmp_manager, async_req=True)\n >>> result = thread.get()\n\n :param SnmpManagerPost snmp_manager: (required)\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: SnmpManagerResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (snmp_manager is None): raise TypeError('Missing the required parameter `snmp_manager` when calling `api211_snmp_managers_post`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if ('snmp_manager' in params): body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)<|docstring|>Create SNMP manager Creates a Purity SNMP manager object that identifies a host (SNMP manager) and specifies the protocol attributes for communicating with it. Once a manager object is created, the transmission of SNMP traps is immediately enabled. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_post_with_http_info(snmp_manager, async_req=True) >>> result = thread.get() :param SnmpManagerPost snmp_manager: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
711ea9e2feb2cf1a890246a7c3156d12b32b24449d6eebdd4dbd77f224638845
def api211_snmp_managers_test_get_with_http_info(self, authorization=None, x_request_id=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP manager test results\n\n Displays SNMP manager test results (traps or informs).\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_test_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: TestResultWithResourceGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers/test', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TestResultWithResourceGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
List SNMP manager test results Displays SNMP manager test results (traps or informs). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_test_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: TestResultWithResourceGetResponse If the method is called asynchronously, returns the request thread.
pypureclient/flasharray/FA_2_11/api/snmp_managers_api.py
api211_snmp_managers_test_get_with_http_info
genegr/py-pure-client
14
python
def api211_snmp_managers_test_get_with_http_info(self, authorization=None, x_request_id=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP manager test results\n\n Displays SNMP manager test results (traps or informs).\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_test_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: TestResultWithResourceGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers/test', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TestResultWithResourceGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api211_snmp_managers_test_get_with_http_info(self, authorization=None, x_request_id=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'List SNMP manager test results\n\n Displays SNMP manager test results (traps or informs).\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api211_snmp_managers_test_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)\n :param str x_request_id: Supplied by client during request or generated by server.\n :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.\n :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.\n :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.\n :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.\n :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.\n :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: TestResultWithResourceGetResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api211_snmp_managers_test_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('filter' in params): query_params.append(('filter', params['filter'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if ('total_item_count' in params): query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if ('authorization' in params): header_params['Authorization'] = params['authorization'] if ('x_request_id' in params): header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/2.11/snmp-managers/test', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TestResultWithResourceGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)<|docstring|>List SNMP manager test results Displays SNMP manager test results (traps or informs). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api211_snmp_managers_test_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: TestResultWithResourceGetResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
f50404ed1c9db82cedef731663bfcb2b2408786791ca744108ccf06e3620e05e
def create_lockfile_name(): 'Generate a unique lock filename using UUID' lock_suffix = str(uuid.uuid4())[:7] return f'smartsim-{lock_suffix}.lock'
Generate a unique lock filename using UUID
smartsim/_core/utils/helpers.py
create_lockfile_name
MattToast/SmartSim
0
python
def create_lockfile_name(): lock_suffix = str(uuid.uuid4())[:7] return f'smartsim-{lock_suffix}.lock'
def create_lockfile_name(): lock_suffix = str(uuid.uuid4())[:7] return f'smartsim-{lock_suffix}.lock'<|docstring|>Generate a unique lock filename using UUID<|endoftext|>
2e85a5f4c706ca0afc3d694b6ba1d96cb450cd09b74026d8b55201045efdca8e
def get_base_36_repr(positive_int): 'Converts a positive integer to its base 36 representation\n :param positive_int: the positive integer to convert\n :type positive_int: int\n :return: base 36 representation of the given positive int\n :rtype: str\n ' digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [] while positive_int: next_digit = digits[(positive_int % 36)] result.append(next_digit) positive_int //= 36 return ''.join(reversed(result))
Converts a positive integer to its base 36 representation :param positive_int: the positive integer to convert :type positive_int: int :return: base 36 representation of the given positive int :rtype: str
smartsim/_core/utils/helpers.py
get_base_36_repr
MattToast/SmartSim
0
python
def get_base_36_repr(positive_int): 'Converts a positive integer to its base 36 representation\n :param positive_int: the positive integer to convert\n :type positive_int: int\n :return: base 36 representation of the given positive int\n :rtype: str\n ' digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [] while positive_int: next_digit = digits[(positive_int % 36)] result.append(next_digit) positive_int //= 36 return .join(reversed(result))
def get_base_36_repr(positive_int): 'Converts a positive integer to its base 36 representation\n :param positive_int: the positive integer to convert\n :type positive_int: int\n :return: base 36 representation of the given positive int\n :rtype: str\n ' digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [] while positive_int: next_digit = digits[(positive_int % 36)] result.append(next_digit) positive_int //= 36 return .join(reversed(result))<|docstring|>Converts a positive integer to its base 36 representation :param positive_int: the positive integer to convert :type positive_int: int :return: base 36 representation of the given positive int :rtype: str<|endoftext|>
24f2f857db55e04a0f317dfb036a8d2cebe6b94a1d51edeefc820e2de772b6a8
def expand_exe_path(exe): 'Takes an executable and returns the full path to that executable\n\n :param exe: executable or file\n :type exe: str\n :raises TypeError: if file is not an executable\n :raises FileNotFoundError: if executable cannot be found\n ' in_path = which(exe) if (not in_path): if (os.path.isfile(exe) and os.access(exe, os.X_OK)): return os.path.abspath(exe) if (os.path.isfile(exe) and (not os.access(exe, os.X_OK))): raise TypeError(f'File, {exe}, is not an executable') raise FileNotFoundError(f'Could not locate executable {exe}') return os.path.abspath(in_path)
Takes an executable and returns the full path to that executable :param exe: executable or file :type exe: str :raises TypeError: if file is not an executable :raises FileNotFoundError: if executable cannot be found
smartsim/_core/utils/helpers.py
expand_exe_path
MattToast/SmartSim
0
python
def expand_exe_path(exe): 'Takes an executable and returns the full path to that executable\n\n :param exe: executable or file\n :type exe: str\n :raises TypeError: if file is not an executable\n :raises FileNotFoundError: if executable cannot be found\n ' in_path = which(exe) if (not in_path): if (os.path.isfile(exe) and os.access(exe, os.X_OK)): return os.path.abspath(exe) if (os.path.isfile(exe) and (not os.access(exe, os.X_OK))): raise TypeError(f'File, {exe}, is not an executable') raise FileNotFoundError(f'Could not locate executable {exe}') return os.path.abspath(in_path)
def expand_exe_path(exe): 'Takes an executable and returns the full path to that executable\n\n :param exe: executable or file\n :type exe: str\n :raises TypeError: if file is not an executable\n :raises FileNotFoundError: if executable cannot be found\n ' in_path = which(exe) if (not in_path): if (os.path.isfile(exe) and os.access(exe, os.X_OK)): return os.path.abspath(exe) if (os.path.isfile(exe) and (not os.access(exe, os.X_OK))): raise TypeError(f'File, {exe}, is not an executable') raise FileNotFoundError(f'Could not locate executable {exe}') return os.path.abspath(in_path)<|docstring|>Takes an executable and returns the full path to that executable :param exe: executable or file :type exe: str :raises TypeError: if file is not an executable :raises FileNotFoundError: if executable cannot be found<|endoftext|>
183f56f44f56e9ce2508b8b187a88baae213809be7ebbfbf083f01a25d213345
def colorize(string, color, bold=False, highlight=False): '\n Colorize a string.\n This function was originally written by John Schulman.\n And then borrowed from spinningup\n https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py\n ' attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
Colorize a string. This function was originally written by John Schulman. And then borrowed from spinningup https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py
smartsim/_core/utils/helpers.py
colorize
MattToast/SmartSim
0
python
def colorize(string, color, bold=False, highlight=False): '\n Colorize a string.\n This function was originally written by John Schulman.\n And then borrowed from spinningup\n https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py\n ' attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
def colorize(string, color, bold=False, highlight=False): '\n Colorize a string.\n This function was originally written by John Schulman.\n And then borrowed from spinningup\n https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py\n ' attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))<|docstring|>Colorize a string. This function was originally written by John Schulman. And then borrowed from spinningup https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py<|endoftext|>
20691331a227c98794598bb17ca69fe81b0cda103ee9443a42f01680e6d79e6e
def delete_elements(dictionary, key_list): 'Delete elements from a dictionary.\n :param dictionary: the dictionary from which the elements must be deleted.\n :type dictionary: dict\n :param key_list: the list of keys to delete from the dictionary.\n :type key: any\n ' for key in key_list: if (key in dictionary): del dictionary[key]
Delete elements from a dictionary. :param dictionary: the dictionary from which the elements must be deleted. :type dictionary: dict :param key_list: the list of keys to delete from the dictionary. :type key: any
smartsim/_core/utils/helpers.py
delete_elements
MattToast/SmartSim
0
python
def delete_elements(dictionary, key_list): 'Delete elements from a dictionary.\n :param dictionary: the dictionary from which the elements must be deleted.\n :type dictionary: dict\n :param key_list: the list of keys to delete from the dictionary.\n :type key: any\n ' for key in key_list: if (key in dictionary): del dictionary[key]
def delete_elements(dictionary, key_list): 'Delete elements from a dictionary.\n :param dictionary: the dictionary from which the elements must be deleted.\n :type dictionary: dict\n :param key_list: the list of keys to delete from the dictionary.\n :type key: any\n ' for key in key_list: if (key in dictionary): del dictionary[key]<|docstring|>Delete elements from a dictionary. :param dictionary: the dictionary from which the elements must be deleted. :type dictionary: dict :param key_list: the list of keys to delete from the dictionary. :type key: any<|endoftext|>
40b5679cf0d72859c958eb986d262e020bd54c2a6e0b0a03a157e721e1c937e9
def cat_arg_and_value(arg_name, value): 'Concatenate a command line argument and its value\n\n This function returns ``arg_name`` and ``value\n concatenated in the best possible way for a command\n line execution, namely:\n - if arg_name starts with `--` (e.g. `--arg`):\n `arg_name=value` is returned (i.e. `--arg=val`)\n - if arg_name starts with `-` (e.g. `-a`):\n `arg_name value` is returned (i.e. `-a val`)\n - if arg_name does not start with `-` and it is a\n long option (e.g. `arg`):\n `--arg_name=value` (i.e., `--arg=val`)\n - if arg_name does not start with `-` and it is a\n short option (e.g. `a`):\n `-arg_name=value` (i.e., `-a val`)\n\n :param arg_name: the command line argument name\n :type arg_name: str\n :param value: the command line argument value\n :type value: str\n ' if arg_name.startswith('--'): return '='.join((arg_name, str(value))) elif arg_name.startswith('-'): return ' '.join((arg_name, str(value))) elif (len(arg_name) == 1): return ' '.join((('-' + arg_name), str(value))) else: return '='.join((('--' + arg_name), str(value)))
Concatenate a command line argument and its value This function returns ``arg_name`` and ``value concatenated in the best possible way for a command line execution, namely: - if arg_name starts with `--` (e.g. `--arg`): `arg_name=value` is returned (i.e. `--arg=val`) - if arg_name starts with `-` (e.g. `-a`): `arg_name value` is returned (i.e. `-a val`) - if arg_name does not start with `-` and it is a long option (e.g. `arg`): `--arg_name=value` (i.e., `--arg=val`) - if arg_name does not start with `-` and it is a short option (e.g. `a`): `-arg_name=value` (i.e., `-a val`) :param arg_name: the command line argument name :type arg_name: str :param value: the command line argument value :type value: str
smartsim/_core/utils/helpers.py
cat_arg_and_value
MattToast/SmartSim
0
python
def cat_arg_and_value(arg_name, value): 'Concatenate a command line argument and its value\n\n This function returns ``arg_name`` and ``value\n concatenated in the best possible way for a command\n line execution, namely:\n - if arg_name starts with `--` (e.g. `--arg`):\n `arg_name=value` is returned (i.e. `--arg=val`)\n - if arg_name starts with `-` (e.g. `-a`):\n `arg_name value` is returned (i.e. `-a val`)\n - if arg_name does not start with `-` and it is a\n long option (e.g. `arg`):\n `--arg_name=value` (i.e., `--arg=val`)\n - if arg_name does not start with `-` and it is a\n short option (e.g. `a`):\n `-arg_name=value` (i.e., `-a val`)\n\n :param arg_name: the command line argument name\n :type arg_name: str\n :param value: the command line argument value\n :type value: str\n ' if arg_name.startswith('--'): return '='.join((arg_name, str(value))) elif arg_name.startswith('-'): return ' '.join((arg_name, str(value))) elif (len(arg_name) == 1): return ' '.join((('-' + arg_name), str(value))) else: return '='.join((('--' + arg_name), str(value)))
def cat_arg_and_value(arg_name, value): 'Concatenate a command line argument and its value\n\n This function returns ``arg_name`` and ``value\n concatenated in the best possible way for a command\n line execution, namely:\n - if arg_name starts with `--` (e.g. `--arg`):\n `arg_name=value` is returned (i.e. `--arg=val`)\n - if arg_name starts with `-` (e.g. `-a`):\n `arg_name value` is returned (i.e. `-a val`)\n - if arg_name does not start with `-` and it is a\n long option (e.g. `arg`):\n `--arg_name=value` (i.e., `--arg=val`)\n - if arg_name does not start with `-` and it is a\n short option (e.g. `a`):\n `-arg_name=value` (i.e., `-a val`)\n\n :param arg_name: the command line argument name\n :type arg_name: str\n :param value: the command line argument value\n :type value: str\n ' if arg_name.startswith('--'): return '='.join((arg_name, str(value))) elif arg_name.startswith('-'): return ' '.join((arg_name, str(value))) elif (len(arg_name) == 1): return ' '.join((('-' + arg_name), str(value))) else: return '='.join((('--' + arg_name), str(value)))<|docstring|>Concatenate a command line argument and its value This function returns ``arg_name`` and ``value concatenated in the best possible way for a command line execution, namely: - if arg_name starts with `--` (e.g. `--arg`): `arg_name=value` is returned (i.e. `--arg=val`) - if arg_name starts with `-` (e.g. `-a`): `arg_name value` is returned (i.e. `-a val`) - if arg_name does not start with `-` and it is a long option (e.g. `arg`): `--arg_name=value` (i.e., `--arg=val`) - if arg_name does not start with `-` and it is a short option (e.g. `a`): `-arg_name=value` (i.e., `-a val`) :param arg_name: the command line argument name :type arg_name: str :param value: the command line argument value :type value: str<|endoftext|>
420de37507d2672f36f956d6a04a6788375c934d9edc99c05817ef55d008f35e
def installed_redisai_backends(backends_path=None): 'Check which ML backends are available for the RedisAI module.\n\n The optional argument ``backends_path`` is needed if the backends\n have not been built as part of the SmartSim building process (i.e.\n they have not been built by invoking `smart build`). In that case\n ``backends_path`` should point to the directory containing e.g.\n the backend directories (`redisai_tensorflow`, `redisai_torch`,\n `redisai_onnxruntime`, or `redisai_tflite`).\n\n :param backends_path: path containing backends, defaults to None\n :type backends_path: str, optional\n :return: list of installed RedisAI backends\n :rtype: list[str]\n ' from ..._core.config import CONFIG installed = [] if (not backends_path): backends_path = (CONFIG.lib_path / 'backends') for backend in ['tensorflow', 'torch', 'onnxruntime', 'tflite']: backend_path = ((backends_path / f'redisai_{backend}') / f'redisai_{backend}.so') backend_so = Path(os.environ.get('RAI_PATH', backend_path)).resolve() if backend_so.is_file(): installed.append(backend) return installed
Check which ML backends are available for the RedisAI module. The optional argument ``backends_path`` is needed if the backends have not been built as part of the SmartSim building process (i.e. they have not been built by invoking `smart build`). In that case ``backends_path`` should point to the directory containing e.g. the backend directories (`redisai_tensorflow`, `redisai_torch`, `redisai_onnxruntime`, or `redisai_tflite`). :param backends_path: path containing backends, defaults to None :type backends_path: str, optional :return: list of installed RedisAI backends :rtype: list[str]
smartsim/_core/utils/helpers.py
installed_redisai_backends
MattToast/SmartSim
0
python
def installed_redisai_backends(backends_path=None): 'Check which ML backends are available for the RedisAI module.\n\n The optional argument ``backends_path`` is needed if the backends\n have not been built as part of the SmartSim building process (i.e.\n they have not been built by invoking `smart build`). In that case\n ``backends_path`` should point to the directory containing e.g.\n the backend directories (`redisai_tensorflow`, `redisai_torch`,\n `redisai_onnxruntime`, or `redisai_tflite`).\n\n :param backends_path: path containing backends, defaults to None\n :type backends_path: str, optional\n :return: list of installed RedisAI backends\n :rtype: list[str]\n ' from ..._core.config import CONFIG installed = [] if (not backends_path): backends_path = (CONFIG.lib_path / 'backends') for backend in ['tensorflow', 'torch', 'onnxruntime', 'tflite']: backend_path = ((backends_path / f'redisai_{backend}') / f'redisai_{backend}.so') backend_so = Path(os.environ.get('RAI_PATH', backend_path)).resolve() if backend_so.is_file(): installed.append(backend) return installed
def installed_redisai_backends(backends_path=None): 'Check which ML backends are available for the RedisAI module.\n\n The optional argument ``backends_path`` is needed if the backends\n have not been built as part of the SmartSim building process (i.e.\n they have not been built by invoking `smart build`). In that case\n ``backends_path`` should point to the directory containing e.g.\n the backend directories (`redisai_tensorflow`, `redisai_torch`,\n `redisai_onnxruntime`, or `redisai_tflite`).\n\n :param backends_path: path containing backends, defaults to None\n :type backends_path: str, optional\n :return: list of installed RedisAI backends\n :rtype: list[str]\n ' from ..._core.config import CONFIG installed = [] if (not backends_path): backends_path = (CONFIG.lib_path / 'backends') for backend in ['tensorflow', 'torch', 'onnxruntime', 'tflite']: backend_path = ((backends_path / f'redisai_{backend}') / f'redisai_{backend}.so') backend_so = Path(os.environ.get('RAI_PATH', backend_path)).resolve() if backend_so.is_file(): installed.append(backend) return installed<|docstring|>Check which ML backends are available for the RedisAI module. The optional argument ``backends_path`` is needed if the backends have not been built as part of the SmartSim building process (i.e. they have not been built by invoking `smart build`). In that case ``backends_path`` should point to the directory containing e.g. the backend directories (`redisai_tensorflow`, `redisai_torch`, `redisai_onnxruntime`, or `redisai_tflite`). :param backends_path: path containing backends, defaults to None :type backends_path: str, optional :return: list of installed RedisAI backends :rtype: list[str]<|endoftext|>
e443939263030fcffa857f44bbbda7a1a3429ddf01bd75b22ec4e086a4fc64e9
def import_helper(mod_name): '\n Helper function used to temporarily override stdout before importing\n a module.\n ' try: sys.stdout = STDOUT_FAKE __import__(mod_name) finally: sys.stdout = STDOUT_BAK
Helper function used to temporarily override stdout before importing a module.
Tests/Tools/stdmodules.py
import_helper
kmad1729/ironpython3
1,872
python
def import_helper(mod_name): '\n Helper function used to temporarily override stdout before importing\n a module.\n ' try: sys.stdout = STDOUT_FAKE __import__(mod_name) finally: sys.stdout = STDOUT_BAK
def import_helper(mod_name): '\n Helper function used to temporarily override stdout before importing\n a module.\n ' try: sys.stdout = STDOUT_FAKE __import__(mod_name) finally: sys.stdout = STDOUT_BAK<|docstring|>Helper function used to temporarily override stdout before importing a module.<|endoftext|>
3e98a30466f1132c8743699fff1b4b776e716581cc80c4ee3fc8b11be84f3432
def is_package(dir_name): '\n Returns True if dir_name is actually a Python package in the current\n working directory.\n ' if ('.' in dir_name): return False try: if (not nt.stat(dir_name)): return False except: return False try: if ('__init__.py' not in nt.listdir(((nt.getcwd() + '\\') + dir_name))): return False except: return False return True
Returns True if dir_name is actually a Python package in the current working directory.
Tests/Tools/stdmodules.py
is_package
kmad1729/ironpython3
1,872
python
def is_package(dir_name): '\n Returns True if dir_name is actually a Python package in the current\n working directory.\n ' if ('.' in dir_name): return False try: if (not nt.stat(dir_name)): return False except: return False try: if ('__init__.py' not in nt.listdir(((nt.getcwd() + '\\') + dir_name))): return False except: return False return True
def is_package(dir_name): '\n Returns True if dir_name is actually a Python package in the current\n working directory.\n ' if ('.' in dir_name): return False try: if (not nt.stat(dir_name)): return False except: return False try: if ('__init__.py' not in nt.listdir(((nt.getcwd() + '\\') + dir_name))): return False except: return False return True<|docstring|>Returns True if dir_name is actually a Python package in the current working directory.<|endoftext|>
2afe2e8758de7f4906be04f3396ec23e651d829ccef7d267e3c7fa68c3ee6836
def check_package(package_name): '\n Checks all subpackages and modules in the package_name package.\n ' cwd = nt.getcwd() if (cwd == CPY_LIB_DIR): root_name = package_name else: root_name = ((cwd.split((CPY_DIR + '\\Lib\\'))[1].replace('\\', '.') + '.') + package_name) try: import_helper(package_name) log_ok(root_name) except (Exception, SystemExit) as e: log_broken(root_name, e) return nt.chdir(((cwd + '\\') + package_name)) for x in nt.listdir('.'): if (x.endswith('.py') and (x not in ('__init__.py', '__main__.py'))): x = x.split('.py', 1)[0] mod_name = ((nt.getcwd().split((CPY_DIR + '\\Lib\\'))[1] + '\\') + x) mod_name = mod_name.replace('\\', '.') try: import_helper(mod_name) log_ok(mod_name) except (Exception, SystemExit) as e: log_broken(mod_name, e) elif (is_package(x) and (not x.startswith('test'))): check_package(x) nt.chdir(cwd)
Checks all subpackages and modules in the package_name package.
Tests/Tools/stdmodules.py
check_package
kmad1729/ironpython3
1,872
python
def check_package(package_name): '\n \n ' cwd = nt.getcwd() if (cwd == CPY_LIB_DIR): root_name = package_name else: root_name = ((cwd.split((CPY_DIR + '\\Lib\\'))[1].replace('\\', '.') + '.') + package_name) try: import_helper(package_name) log_ok(root_name) except (Exception, SystemExit) as e: log_broken(root_name, e) return nt.chdir(((cwd + '\\') + package_name)) for x in nt.listdir('.'): if (x.endswith('.py') and (x not in ('__init__.py', '__main__.py'))): x = x.split('.py', 1)[0] mod_name = ((nt.getcwd().split((CPY_DIR + '\\Lib\\'))[1] + '\\') + x) mod_name = mod_name.replace('\\', '.') try: import_helper(mod_name) log_ok(mod_name) except (Exception, SystemExit) as e: log_broken(mod_name, e) elif (is_package(x) and (not x.startswith('test'))): check_package(x) nt.chdir(cwd)
def check_package(package_name): '\n \n ' cwd = nt.getcwd() if (cwd == CPY_LIB_DIR): root_name = package_name else: root_name = ((cwd.split((CPY_DIR + '\\Lib\\'))[1].replace('\\', '.') + '.') + package_name) try: import_helper(package_name) log_ok(root_name) except (Exception, SystemExit) as e: log_broken(root_name, e) return nt.chdir(((cwd + '\\') + package_name)) for x in nt.listdir('.'): if (x.endswith('.py') and (x not in ('__init__.py', '__main__.py'))): x = x.split('.py', 1)[0] mod_name = ((nt.getcwd().split((CPY_DIR + '\\Lib\\'))[1] + '\\') + x) mod_name = mod_name.replace('\\', '.') try: import_helper(mod_name) log_ok(mod_name) except (Exception, SystemExit) as e: log_broken(mod_name, e) elif (is_package(x) and (not x.startswith('test'))): check_package(x) nt.chdir(cwd)<|docstring|>Checks all subpackages and modules in the package_name package.<|endoftext|>
58228279d53cf4115f00a088f114ec01bf4aacc98352c111a56b59d9b4d5d361
def add_lineselector(figure): 'Add a line selector for all axes of the given figure.\n Return the mpl connection id for disconnection later.\n ' lineselector = LineSelector(figure.get_axes()) def wrapper(event): lineselector.handler(event) return figure.canvas.mpl_connect('key_press_event', wrapper)
Add a line selector for all axes of the given figure. Return the mpl connection id for disconnection later.
frexp/plot/lineselector.py
add_lineselector
brandjon/frexp
0
python
def add_lineselector(figure): 'Add a line selector for all axes of the given figure.\n Return the mpl connection id for disconnection later.\n ' lineselector = LineSelector(figure.get_axes()) def wrapper(event): lineselector.handler(event) return figure.canvas.mpl_connect('key_press_event', wrapper)
def add_lineselector(figure): 'Add a line selector for all axes of the given figure.\n Return the mpl connection id for disconnection later.\n ' lineselector = LineSelector(figure.get_axes()) def wrapper(event): lineselector.handler(event) return figure.canvas.mpl_connect('key_press_event', wrapper)<|docstring|>Add a line selector for all axes of the given figure. Return the mpl connection id for disconnection later.<|endoftext|>
a149667aa1994659b0088b64545271c23f8c40d6ee0da0b35da4bf3b208de1f7
def goto(self, i): 'Go to the new index. No effect if cursor is currently i.' if (i == self.cursor): return if (self.cursor is not None): self.cb_deactivate(self.cursor, active=False) self.cursor = i if (self.cursor is not None): self.cb_activate(self.cursor, active=True)
Go to the new index. No effect if cursor is currently i.
frexp/plot/lineselector.py
goto
brandjon/frexp
0
python
def goto(self, i): if (i == self.cursor): return if (self.cursor is not None): self.cb_deactivate(self.cursor, active=False) self.cursor = i if (self.cursor is not None): self.cb_activate(self.cursor, active=True)
def goto(self, i): if (i == self.cursor): return if (self.cursor is not None): self.cb_deactivate(self.cursor, active=False) self.cursor = i if (self.cursor is not None): self.cb_activate(self.cursor, active=True)<|docstring|>Go to the new index. No effect if cursor is currently i.<|endoftext|>
e66bc336c9cd98a76dabdfbabf3884d04fe250de3fb7df057ac4bb7729ef77a4
def changeby(self, offset): 'Skip to an offset of the current position.' states = ([None] + list(range(0, self.num_elems))) i = states.index(self.cursor) i = ((i + offset) % len(states)) self.goto(states[i])
Skip to an offset of the current position.
frexp/plot/lineselector.py
changeby
brandjon/frexp
0
python
def changeby(self, offset): states = ([None] + list(range(0, self.num_elems))) i = states.index(self.cursor) i = ((i + offset) % len(states)) self.goto(states[i])
def changeby(self, offset): states = ([None] + list(range(0, self.num_elems))) i = states.index(self.cursor) i = ((i + offset) % len(states)) self.goto(states[i])<|docstring|>Skip to an offset of the current position.<|endoftext|>
1a91ad4bc2e18dd08348805086160d9c3a5b4f11281bf5a1b01ccce4628f55c4
def __init__(self, axes_list): 'Construct to select from among lines of the given axes.' self.lines = [] self.leglines = [] self.legtexts = [] for axes in axes_list: new_lines = [line for line in axes.get_lines() if (line.get_label() != '_nolegend_')] leg = axes.get_legend() if (leg is not None): new_leglines = leg.get_lines() new_legtexts = leg.get_texts() else: new_leglines = [] new_legtexts = [] assert (len(new_lines) == len(new_leglines) == len(new_legtexts)) self.lines.extend(new_lines) self.leglines.extend(new_leglines) self.legtexts.extend(new_legtexts) self.cursor = SelectorCursor(len(self.lines), self.markLine, self.unmarkLine)
Construct to select from among lines of the given axes.
frexp/plot/lineselector.py
__init__
brandjon/frexp
0
python
def __init__(self, axes_list): self.lines = [] self.leglines = [] self.legtexts = [] for axes in axes_list: new_lines = [line for line in axes.get_lines() if (line.get_label() != '_nolegend_')] leg = axes.get_legend() if (leg is not None): new_leglines = leg.get_lines() new_legtexts = leg.get_texts() else: new_leglines = [] new_legtexts = [] assert (len(new_lines) == len(new_leglines) == len(new_legtexts)) self.lines.extend(new_lines) self.leglines.extend(new_leglines) self.legtexts.extend(new_legtexts) self.cursor = SelectorCursor(len(self.lines), self.markLine, self.unmarkLine)
def __init__(self, axes_list): self.lines = [] self.leglines = [] self.legtexts = [] for axes in axes_list: new_lines = [line for line in axes.get_lines() if (line.get_label() != '_nolegend_')] leg = axes.get_legend() if (leg is not None): new_leglines = leg.get_lines() new_legtexts = leg.get_texts() else: new_leglines = [] new_legtexts = [] assert (len(new_lines) == len(new_leglines) == len(new_legtexts)) self.lines.extend(new_lines) self.leglines.extend(new_leglines) self.legtexts.extend(new_legtexts) self.cursor = SelectorCursor(len(self.lines), self.markLine, self.unmarkLine)<|docstring|>Construct to select from among lines of the given axes.<|endoftext|>
4f021fb3ca8b9f538e802a48d9a26a7164b658fb06e45e9c3798675e6d316193
def get_cur_file_path() -> Path: '\n Description: get path of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve()
Description: get path of current file :param NAME: TYPE, MEAN :return: TYPE, MEAN
Released2019June06/GeneralUtils.py
get_cur_file_path
minhncedutw/pointcloud-robot-grasp
3
python
def get_cur_file_path() -> Path: '\n Description: get path of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve()
def get_cur_file_path() -> Path: '\n Description: get path of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve()<|docstring|>Description: get path of current file :param NAME: TYPE, MEAN :return: TYPE, MEAN<|endoftext|>
601ffa6fef1f28cc4c43e6661bf26345bcd5d294a07742c5af752204c2a060c7
def get_cur_parent_dir() -> Path: '\n Description: get parent directory of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve().parent
Description: get parent directory of current file :param NAME: TYPE, MEAN :return: TYPE, MEAN
Released2019June06/GeneralUtils.py
get_cur_parent_dir
minhncedutw/pointcloud-robot-grasp
3
python
def get_cur_parent_dir() -> Path: '\n Description: get parent directory of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve().parent
def get_cur_parent_dir() -> Path: '\n Description: get parent directory of current file\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).resolve().parent<|docstring|>Description: get parent directory of current file :param NAME: TYPE, MEAN :return: TYPE, MEAN<|endoftext|>
64f220cc03b51cbf1a477c7ee43030bbf8538ed1e33832ddcec94d4628813d7f
def get_cur_exe_dir() -> Path: '\n Description: get current execution directory\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).cwd()
Description: get current execution directory :param NAME: TYPE, MEAN :return: TYPE, MEAN
Released2019June06/GeneralUtils.py
get_cur_exe_dir
minhncedutw/pointcloud-robot-grasp
3
python
def get_cur_exe_dir() -> Path: '\n Description: get current execution directory\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).cwd()
def get_cur_exe_dir() -> Path: '\n Description: get current execution directory\n :param NAME: TYPE, MEAN\n :return: TYPE, MEAN\n ' return Path(__file__).cwd()<|docstring|>Description: get current execution directory :param NAME: TYPE, MEAN :return: TYPE, MEAN<|endoftext|>
16a8decbca4e460ddfa100ff6344124ada735c94853550e67c05446775948b6a
def makedir(path: Union[(str, Path)], mode=511, parents: bool=True, exist_ok: bool=False, verbose=False): '\n Description:\n :param path: [Path, str], path\n :param mode: [0o777, 0o444, ...], chmod(refer: https://help.ubuntu.com/community/FilePermissions)\n :param parents: [Path, str], True: if path is relative & False: if path is absolute\n :param exist_ok: boolean, if path already exists, True: force overwrite & False: raise error\n :return: TYPE, MEAN\n ' if verbose: print('Create directory: \n\t{} \n\t{}'.format(Path(path).resolve(), Path(path).absolute())) Path(path).mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
Description: :param path: [Path, str], path :param mode: [0o777, 0o444, ...], chmod(refer: https://help.ubuntu.com/community/FilePermissions) :param parents: [Path, str], True: if path is relative & False: if path is absolute :param exist_ok: boolean, if path already exists, True: force overwrite & False: raise error :return: TYPE, MEAN
Released2019June06/GeneralUtils.py
makedir
minhncedutw/pointcloud-robot-grasp
3
python
def makedir(path: Union[(str, Path)], mode=511, parents: bool=True, exist_ok: bool=False, verbose=False): '\n Description:\n :param path: [Path, str], path\n :param mode: [0o777, 0o444, ...], chmod(refer: https://help.ubuntu.com/community/FilePermissions)\n :param parents: [Path, str], True: if path is relative & False: if path is absolute\n :param exist_ok: boolean, if path already exists, True: force overwrite & False: raise error\n :return: TYPE, MEAN\n ' if verbose: print('Create directory: \n\t{} \n\t{}'.format(Path(path).resolve(), Path(path).absolute())) Path(path).mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
def makedir(path: Union[(str, Path)], mode=511, parents: bool=True, exist_ok: bool=False, verbose=False): '\n Description:\n :param path: [Path, str], path\n :param mode: [0o777, 0o444, ...], chmod(refer: https://help.ubuntu.com/community/FilePermissions)\n :param parents: [Path, str], True: if path is relative & False: if path is absolute\n :param exist_ok: boolean, if path already exists, True: force overwrite & False: raise error\n :return: TYPE, MEAN\n ' if verbose: print('Create directory: \n\t{} \n\t{}'.format(Path(path).resolve(), Path(path).absolute())) Path(path).mkdir(mode=mode, parents=parents, exist_ok=exist_ok)<|docstring|>Description: :param path: [Path, str], path :param mode: [0o777, 0o444, ...], chmod(refer: https://help.ubuntu.com/community/FilePermissions) :param parents: [Path, str], True: if path is relative & False: if path is absolute :param exist_ok: boolean, if path already exists, True: force overwrite & False: raise error :return: TYPE, MEAN<|endoftext|>
25abc935917b65ff1fba2acbd0f0f65ccb234885f8128050ffbb83e24b16f195
def onehot_encoding(labels, n_classes) -> np.ndarray: '\n Description: convert integer labels to one hot\n :param labels: an [int, ndarray], a label array of shape (d0, d1, d2, ...dn)\n :param n_classes: an int, number of classes\n :return: [ndarray], an one hot array of shape (d0, d1, ...dn, n_classes)\n ' onehot = np.identity(n_classes)[labels] return onehot
Description: convert integer labels to one hot :param labels: an [int, ndarray], a label array of shape (d0, d1, d2, ...dn) :param n_classes: an int, number of classes :return: [ndarray], an one hot array of shape (d0, d1, ...dn, n_classes)
Released2019June06/GeneralUtils.py
onehot_encoding
minhncedutw/pointcloud-robot-grasp
3
python
def onehot_encoding(labels, n_classes) -> np.ndarray: '\n Description: convert integer labels to one hot\n :param labels: an [int, ndarray], a label array of shape (d0, d1, d2, ...dn)\n :param n_classes: an int, number of classes\n :return: [ndarray], an one hot array of shape (d0, d1, ...dn, n_classes)\n ' onehot = np.identity(n_classes)[labels] return onehot
def onehot_encoding(labels, n_classes) -> np.ndarray: '\n Description: convert integer labels to one hot\n :param labels: an [int, ndarray], a label array of shape (d0, d1, d2, ...dn)\n :param n_classes: an int, number of classes\n :return: [ndarray], an one hot array of shape (d0, d1, ...dn, n_classes)\n ' onehot = np.identity(n_classes)[labels] return onehot<|docstring|>Description: convert integer labels to one hot :param labels: an [int, ndarray], a label array of shape (d0, d1, d2, ...dn) :param n_classes: an int, number of classes :return: [ndarray], an one hot array of shape (d0, d1, ...dn, n_classes)<|endoftext|>
c013eb7ce765e9c1d4276fab0f0fe55c2959bd0845d2d10ff836951aa1e4455f
def onehot_decoding(probs, class_axis) -> np.ndarray: "\n Description: convert one-hot encoding to labels\n :param probs: [ndarray], an probability array, one-hot-encoding type of shape (d0, d1, ...dn)\n :param class_axis: int, axis of classes in 'probs' array(0 <= class_axis <= n)\n :return: [int, ndarray], an label array of shape (d0, d1, ...dn-1)\n " labels = np.argmax(np.asarray(probs), axis=class_axis) return labels
Description: convert one-hot encoding to labels :param probs: [ndarray], an probability array, one-hot-encoding type of shape (d0, d1, ...dn) :param class_axis: int, axis of classes in 'probs' array(0 <= class_axis <= n) :return: [int, ndarray], an label array of shape (d0, d1, ...dn-1)
Released2019June06/GeneralUtils.py
onehot_decoding
minhncedutw/pointcloud-robot-grasp
3
python
def onehot_decoding(probs, class_axis) -> np.ndarray: "\n Description: convert one-hot encoding to labels\n :param probs: [ndarray], an probability array, one-hot-encoding type of shape (d0, d1, ...dn)\n :param class_axis: int, axis of classes in 'probs' array(0 <= class_axis <= n)\n :return: [int, ndarray], an label array of shape (d0, d1, ...dn-1)\n " labels = np.argmax(np.asarray(probs), axis=class_axis) return labels
def onehot_decoding(probs, class_axis) -> np.ndarray: "\n Description: convert one-hot encoding to labels\n :param probs: [ndarray], an probability array, one-hot-encoding type of shape (d0, d1, ...dn)\n :param class_axis: int, axis of classes in 'probs' array(0 <= class_axis <= n)\n :return: [int, ndarray], an label array of shape (d0, d1, ...dn-1)\n " labels = np.argmax(np.asarray(probs), axis=class_axis) return labels<|docstring|>Description: convert one-hot encoding to labels :param probs: [ndarray], an probability array, one-hot-encoding type of shape (d0, d1, ...dn) :param class_axis: int, axis of classes in 'probs' array(0 <= class_axis <= n) :return: [int, ndarray], an label array of shape (d0, d1, ...dn-1)<|endoftext|>
9c28a637bd36eb69a3aa50a8b77028e14242a1fe61d62a38b4cdf5c88a06290b
def sample_indices(n_samples: int, max_index: int, replace: bool=None) -> np.ndarray: '\n Get a list indice sample for an array\n :param n_samples: an integer, number of expected samples\n :param length: an integer, length of array\n :return: an array of numpy, is a list of indices\n ' if (replace is None): replace = (n_samples > max_index) new_indices = np.random.choice(a=max_index, size=n_samples, replace=replace) return new_indices
Get a list indice sample for an array :param n_samples: an integer, number of expected samples :param length: an integer, length of array :return: an array of numpy, is a list of indices
Released2019June06/GeneralUtils.py
sample_indices
minhncedutw/pointcloud-robot-grasp
3
python
def sample_indices(n_samples: int, max_index: int, replace: bool=None) -> np.ndarray: '\n Get a list indice sample for an array\n :param n_samples: an integer, number of expected samples\n :param length: an integer, length of array\n :return: an array of numpy, is a list of indices\n ' if (replace is None): replace = (n_samples > max_index) new_indices = np.random.choice(a=max_index, size=n_samples, replace=replace) return new_indices
def sample_indices(n_samples: int, max_index: int, replace: bool=None) -> np.ndarray: '\n Get a list indice sample for an array\n :param n_samples: an integer, number of expected samples\n :param length: an integer, length of array\n :return: an array of numpy, is a list of indices\n ' if (replace is None): replace = (n_samples > max_index) new_indices = np.random.choice(a=max_index, size=n_samples, replace=replace) return new_indices<|docstring|>Get a list indice sample for an array :param n_samples: an integer, number of expected samples :param length: an integer, length of array :return: an array of numpy, is a list of indices<|endoftext|>
64c01150140b664f2ae28f29f9c3bdde5d85888f05d7d563571de96505f518bf
def sample_arrays(arrs: Union[(np.ndarray, List[np.ndarray], Tuple[np.ndarray])], n_samples: int) -> List[np.ndarray]: '\n Sample a list of arrays\n :param arrs: List or Tuple of ndarray, the arrays that need to be sampled\n :param n_samples: an integer, number of expected samples\n :return: a list of numpy array, that are synchronically-sampled arrays\n ' if (isinstance(arrs, List) or isinstance(arrs, Tuple)): list_or_tuple = True else: list_or_tuple = False arrs = [arrs] lengths = [len(arr) for arr in arrs] assert (len(np.unique(lengths)) == 1), 'Input arrss must have SAME length!' new_indices = sample_indices(n_samples=n_samples, max_index=lengths[0]) results = [arr[new_indices] for arr in arrs] return (results if list_or_tuple else results[0])
Sample a list of arrays :param arrs: List or Tuple of ndarray, the arrays that need to be sampled :param n_samples: an integer, number of expected samples :return: a list of numpy array, that are synchronically-sampled arrays
Released2019June06/GeneralUtils.py
sample_arrays
minhncedutw/pointcloud-robot-grasp
3
python
def sample_arrays(arrs: Union[(np.ndarray, List[np.ndarray], Tuple[np.ndarray])], n_samples: int) -> List[np.ndarray]: '\n Sample a list of arrays\n :param arrs: List or Tuple of ndarray, the arrays that need to be sampled\n :param n_samples: an integer, number of expected samples\n :return: a list of numpy array, that are synchronically-sampled arrays\n ' if (isinstance(arrs, List) or isinstance(arrs, Tuple)): list_or_tuple = True else: list_or_tuple = False arrs = [arrs] lengths = [len(arr) for arr in arrs] assert (len(np.unique(lengths)) == 1), 'Input arrss must have SAME length!' new_indices = sample_indices(n_samples=n_samples, max_index=lengths[0]) results = [arr[new_indices] for arr in arrs] return (results if list_or_tuple else results[0])
def sample_arrays(arrs: Union[(np.ndarray, List[np.ndarray], Tuple[np.ndarray])], n_samples: int) -> List[np.ndarray]: '\n Sample a list of arrays\n :param arrs: List or Tuple of ndarray, the arrays that need to be sampled\n :param n_samples: an integer, number of expected samples\n :return: a list of numpy array, that are synchronically-sampled arrays\n ' if (isinstance(arrs, List) or isinstance(arrs, Tuple)): list_or_tuple = True else: list_or_tuple = False arrs = [arrs] lengths = [len(arr) for arr in arrs] assert (len(np.unique(lengths)) == 1), 'Input arrss must have SAME length!' new_indices = sample_indices(n_samples=n_samples, max_index=lengths[0]) results = [arr[new_indices] for arr in arrs] return (results if list_or_tuple else results[0])<|docstring|>Sample a list of arrays :param arrs: List or Tuple of ndarray, the arrays that need to be sampled :param n_samples: an integer, number of expected samples :return: a list of numpy array, that are synchronically-sampled arrays<|endoftext|>
56493331f48ae3e70ad0a7ef11ad57c9c306a07bdff61bb49410ad82151f8bf5
def load_pickle(name: str): '\n Description:\n :param name: str, file name without file extension\n :return: obj\n ' handle = open((name + '.pickle'), 'rb') obj = pickle.load(file=handle) handle.close() return obj
Description: :param name: str, file name without file extension :return: obj
Released2019June06/GeneralUtils.py
load_pickle
minhncedutw/pointcloud-robot-grasp
3
python
def load_pickle(name: str): '\n Description:\n :param name: str, file name without file extension\n :return: obj\n ' handle = open((name + '.pickle'), 'rb') obj = pickle.load(file=handle) handle.close() return obj
def load_pickle(name: str): '\n Description:\n :param name: str, file name without file extension\n :return: obj\n ' handle = open((name + '.pickle'), 'rb') obj = pickle.load(file=handle) handle.close() return obj<|docstring|>Description: :param name: str, file name without file extension :return: obj<|endoftext|>
06cca1124f312730e228af61a844eb094615387fea483bafdb6d662df2e48977
def connect(config: dict) -> InfluxDBClient: "Connect to the InfluxDB with given config\n\n :param config: Dictionary (or object with dictionary interface) in format:\n\n {'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password',\n 'database': 'database'}\n\n or in format:\n\n {'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username',\n 'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}\n " host = (config['host'] if ('host' in config) else (config['INFLUXDB_HOST'] if ('INFLUXDB_HOST' in config) else 'localhost')) port = (int(config['port']) if ('port' in config) else (int(config['INFLUXDB_PORT']) if ('INFLUXDB_PORT' in config) else 8086)) timeout = (int(config['timeout']) if ('timeout' in config) else (int(config['INFLUXDB_TIMEOUT']) if ('INFLUXDB_TIMEOUT' in config) else 5)) username = (config['username'] if ('username' in config) else config['INFLUXDB_USERNAME']) password = (config['password'] if ('password' in config) else config['INFLUXDB_PASSWORD']) database = (config['database'] if ('database' in config) else config['INFLUXDB_DATABASE']) return InfluxDBClient(host=host, port=port, username=username, password=password, database=database, timeout=timeout)
Connect to the InfluxDB with given config :param config: Dictionary (or object with dictionary interface) in format: {'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password', 'database': 'database'} or in format: {'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username', 'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}
dbinflux/dbinflux.py
connect
andyceo/pylibs
1
python
def connect(config: dict) -> InfluxDBClient: "Connect to the InfluxDB with given config\n\n :param config: Dictionary (or object with dictionary interface) in format:\n\n {'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password',\n 'database': 'database'}\n\n or in format:\n\n {'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username',\n 'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}\n " host = (config['host'] if ('host' in config) else (config['INFLUXDB_HOST'] if ('INFLUXDB_HOST' in config) else 'localhost')) port = (int(config['port']) if ('port' in config) else (int(config['INFLUXDB_PORT']) if ('INFLUXDB_PORT' in config) else 8086)) timeout = (int(config['timeout']) if ('timeout' in config) else (int(config['INFLUXDB_TIMEOUT']) if ('INFLUXDB_TIMEOUT' in config) else 5)) username = (config['username'] if ('username' in config) else config['INFLUXDB_USERNAME']) password = (config['password'] if ('password' in config) else config['INFLUXDB_PASSWORD']) database = (config['database'] if ('database' in config) else config['INFLUXDB_DATABASE']) return InfluxDBClient(host=host, port=port, username=username, password=password, database=database, timeout=timeout)
def connect(config: dict) -> InfluxDBClient: "Connect to the InfluxDB with given config\n\n :param config: Dictionary (or object with dictionary interface) in format:\n\n {'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password',\n 'database': 'database'}\n\n or in format:\n\n {'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username',\n 'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}\n " host = (config['host'] if ('host' in config) else (config['INFLUXDB_HOST'] if ('INFLUXDB_HOST' in config) else 'localhost')) port = (int(config['port']) if ('port' in config) else (int(config['INFLUXDB_PORT']) if ('INFLUXDB_PORT' in config) else 8086)) timeout = (int(config['timeout']) if ('timeout' in config) else (int(config['INFLUXDB_TIMEOUT']) if ('INFLUXDB_TIMEOUT' in config) else 5)) username = (config['username'] if ('username' in config) else config['INFLUXDB_USERNAME']) password = (config['password'] if ('password' in config) else config['INFLUXDB_PASSWORD']) database = (config['database'] if ('database' in config) else config['INFLUXDB_DATABASE']) return InfluxDBClient(host=host, port=port, username=username, password=password, database=database, timeout=timeout)<|docstring|>Connect to the InfluxDB with given config :param config: Dictionary (or object with dictionary interface) in format: {'host': 'localhost', 'port': 8086, 'timeout': 5, 'username': 'username', 'password': 'password', 'database': 'database'} or in format: {'INFLUXDB_HOST': 'localhost', 'INFLUXDB_PORT': 8086, 'INFLUXDB_TIMEOUT': 5, 'INFLUXDB_USERNAME': 'username', 'INFLUXDB_PASSWORD': 'password', 'INFLUXDB_DATABASE': 'database'}<|endoftext|>
2c23759c29a6947a3869a9f77b6fa8371d7433d9396fd5efbbacfb07e23c644c
def dump_measurement_csv(client, measurement, chunk_size=500, logger=None, show_cli_cmd=False): 'Dump given measurement to csv file' if (not logger): logging.basicConfig(level=logging.INFO) logger = logging.getLogger() query = 'SELECT * FROM {}'.format(measurement) if show_cli_cmd: logger.info("0. Stop inserting in measurement '%s'", measurement) logger.info('1. Execute following command in InfluxDB CLI to get same output faster:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute '%s LIMIT 2' -format csv > /tmp/%s.csv", client._database, client._username, client._password, query, measurement) logger.info('2. Execute 1 once again and check files hashes, to be sure no new data was saved during export') logger.info(" Also, you may want count points number with 'wc -l /tmp/%s.csv'", measurement) logger.info("3. Then transform csv file '%s.csv' -> '%s.txt' (line protocol file) with csv2lp() function", measurement, measurement) logger.info(' Also, do any data transformation you want, for example, type conversion, etc') logger.info('4. Drop measurement: DROP MEASUREMENT %s', measurement) logger.info('5. And last, import data back into InfluxDB:') logger.info(" influx -username '%s' -password '%s' -import -pps 10000 -path=%s.txt", client._username, client._password, measurement) logger.info('6. Check new measurement schema with:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW FIELD KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG VALUES FROM %s WITH KEY IN (...)'", client._database, client._username, client._password, measurement) else: logger.info("Dumping measurement '%s' started...", measurement) logger.info("Start query '%s' with chunk size %d...", query, chunk_size) t0 = time.time() res = client.query(query, chunked=True, chunk_size=chunk_size) t1 = time.time() tdiff = (t1 - t0) logger.info('End query. Time: %ds (%.2fm)', tdiff, (tdiff / 60))
Dump given measurement to csv file
dbinflux/dbinflux.py
dump_measurement_csv
andyceo/pylibs
1
python
def dump_measurement_csv(client, measurement, chunk_size=500, logger=None, show_cli_cmd=False): if (not logger): logging.basicConfig(level=logging.INFO) logger = logging.getLogger() query = 'SELECT * FROM {}'.format(measurement) if show_cli_cmd: logger.info("0. Stop inserting in measurement '%s'", measurement) logger.info('1. Execute following command in InfluxDB CLI to get same output faster:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute '%s LIMIT 2' -format csv > /tmp/%s.csv", client._database, client._username, client._password, query, measurement) logger.info('2. Execute 1 once again and check files hashes, to be sure no new data was saved during export') logger.info(" Also, you may want count points number with 'wc -l /tmp/%s.csv'", measurement) logger.info("3. Then transform csv file '%s.csv' -> '%s.txt' (line protocol file) with csv2lp() function", measurement, measurement) logger.info(' Also, do any data transformation you want, for example, type conversion, etc') logger.info('4. Drop measurement: DROP MEASUREMENT %s', measurement) logger.info('5. And last, import data back into InfluxDB:') logger.info(" influx -username '%s' -password '%s' -import -pps 10000 -path=%s.txt", client._username, client._password, measurement) logger.info('6. Check new measurement schema with:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW FIELD KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG VALUES FROM %s WITH KEY IN (...)'", client._database, client._username, client._password, measurement) else: logger.info("Dumping measurement '%s' started...", measurement) logger.info("Start query '%s' with chunk size %d...", query, chunk_size) t0 = time.time() res = client.query(query, chunked=True, chunk_size=chunk_size) t1 = time.time() tdiff = (t1 - t0) logger.info('End query. Time: %ds (%.2fm)', tdiff, (tdiff / 60))
def dump_measurement_csv(client, measurement, chunk_size=500, logger=None, show_cli_cmd=False): if (not logger): logging.basicConfig(level=logging.INFO) logger = logging.getLogger() query = 'SELECT * FROM {}'.format(measurement) if show_cli_cmd: logger.info("0. Stop inserting in measurement '%s'", measurement) logger.info('1. Execute following command in InfluxDB CLI to get same output faster:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute '%s LIMIT 2' -format csv > /tmp/%s.csv", client._database, client._username, client._password, query, measurement) logger.info('2. Execute 1 once again and check files hashes, to be sure no new data was saved during export') logger.info(" Also, you may want count points number with 'wc -l /tmp/%s.csv'", measurement) logger.info("3. Then transform csv file '%s.csv' -> '%s.txt' (line protocol file) with csv2lp() function", measurement, measurement) logger.info(' Also, do any data transformation you want, for example, type conversion, etc') logger.info('4. Drop measurement: DROP MEASUREMENT %s', measurement) logger.info('5. And last, import data back into InfluxDB:') logger.info(" influx -username '%s' -password '%s' -import -pps 10000 -path=%s.txt", client._username, client._password, measurement) logger.info('6. Check new measurement schema with:') logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW FIELD KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG KEYS FROM %s'", client._database, client._username, client._password, measurement) logger.info(" influx -database '%s' -username '%s' -password '%s' -execute 'SHOW TAG VALUES FROM %s WITH KEY IN (...)'", client._database, client._username, client._password, measurement) else: logger.info("Dumping measurement '%s' started...", measurement) logger.info("Start query '%s' with chunk size %d...", query, chunk_size) t0 = time.time() res = client.query(query, chunked=True, chunk_size=chunk_size) t1 = time.time() tdiff = (t1 - t0) logger.info('End query. Time: %ds (%.2fm)', tdiff, (tdiff / 60))<|docstring|>Dump given measurement to csv file<|endoftext|>
78e576e2ab2e7c427e0f05c7e4e77ff8f89fc4b284eaacfe3dca585aee3a773c
def csv2lp(csv_filepath, tags_keys=None, database=None, retention_policy=None): "Transform given csv file into file protocol file. Run example:\n csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')" tags_keys = (tags_keys if tags_keys else []) (path, filename) = os.path.split(csv_filepath) filename_wo_extension = os.path.splitext(os.path.basename(filename))[0] lp_filepath = (((path + '/') + filename_wo_extension) + '.txt') with open(csv_filepath) as csvfile: reader = csv.DictReader(csvfile) with open(lp_filepath, 'w') as lp: if (database and retention_policy): lp.write('# DML\n') lp.write('# CONTEXT-DATABASE: {}\n'.format(database)) lp.write('# CONTEXT-RETENTION-POLICY: {}\n\n'.format(retention_policy)) for row in reader: tag_set = [] for tag_key in tags_keys: tag_set.append('{}={}'.format(tag_key, row[tag_key])) tag_set = ','.join(tag_set) field_set = [] excludes = (['name', 'time'] + tags_keys) for (field_key, field_value) in row.items(): if (field_key not in excludes): field_set.append('{}={}'.format(field_key, field_value)) field_set = ','.join(field_set) name = row['name'] time = row['time'] lp.write('{},{} {} {}\n'.format(name, tag_set, field_set, time))
Transform given csv file into file protocol file. Run example: csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')
dbinflux/dbinflux.py
csv2lp
andyceo/pylibs
1
python
def csv2lp(csv_filepath, tags_keys=None, database=None, retention_policy=None): "Transform given csv file into file protocol file. Run example:\n csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')" tags_keys = (tags_keys if tags_keys else []) (path, filename) = os.path.split(csv_filepath) filename_wo_extension = os.path.splitext(os.path.basename(filename))[0] lp_filepath = (((path + '/') + filename_wo_extension) + '.txt') with open(csv_filepath) as csvfile: reader = csv.DictReader(csvfile) with open(lp_filepath, 'w') as lp: if (database and retention_policy): lp.write('# DML\n') lp.write('# CONTEXT-DATABASE: {}\n'.format(database)) lp.write('# CONTEXT-RETENTION-POLICY: {}\n\n'.format(retention_policy)) for row in reader: tag_set = [] for tag_key in tags_keys: tag_set.append('{}={}'.format(tag_key, row[tag_key])) tag_set = ','.join(tag_set) field_set = [] excludes = (['name', 'time'] + tags_keys) for (field_key, field_value) in row.items(): if (field_key not in excludes): field_set.append('{}={}'.format(field_key, field_value)) field_set = ','.join(field_set) name = row['name'] time = row['time'] lp.write('{},{} {} {}\n'.format(name, tag_set, field_set, time))
def csv2lp(csv_filepath, tags_keys=None, database=None, retention_policy=None): "Transform given csv file into file protocol file. Run example:\n csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')" tags_keys = (tags_keys if tags_keys else []) (path, filename) = os.path.split(csv_filepath) filename_wo_extension = os.path.splitext(os.path.basename(filename))[0] lp_filepath = (((path + '/') + filename_wo_extension) + '.txt') with open(csv_filepath) as csvfile: reader = csv.DictReader(csvfile) with open(lp_filepath, 'w') as lp: if (database and retention_policy): lp.write('# DML\n') lp.write('# CONTEXT-DATABASE: {}\n'.format(database)) lp.write('# CONTEXT-RETENTION-POLICY: {}\n\n'.format(retention_policy)) for row in reader: tag_set = [] for tag_key in tags_keys: tag_set.append('{}={}'.format(tag_key, row[tag_key])) tag_set = ','.join(tag_set) field_set = [] excludes = (['name', 'time'] + tags_keys) for (field_key, field_value) in row.items(): if (field_key not in excludes): field_set.append('{}={}'.format(field_key, field_value)) field_set = ','.join(field_set) name = row['name'] time = row['time'] lp.write('{},{} {} {}\n'.format(name, tag_set, field_set, time))<|docstring|>Transform given csv file into file protocol file. Run example: csv2lp('/root/bitfinex_ticker.csv', ['symbol'], 'alfadirect', 'alfadirect')<|endoftext|>
01e9d150688cdcd3a9c99ad2412e46ea6e4339eaa021733e988d5eaa7a16cbd3
def move_points(source, dest): "This function helps transfer points from one database (and/or measurement) to another one. Here is the demo\n script using that function:\n\n\n import pylibs\n\n source = {\n 'client': pylibs.connect({\n 'host': 'influxdb_source',\n 'username': 'user1',\n 'password': 'super_secret_password',\n 'database': 'some_database'\n }),\n 'measurement': 'dockerhub',\n 'fields': ['field_1', 'field_2', 'another_field'],\n 'tags': ['tag_1', 'tag_2']\n }\n\n dest = pylibs.connect({\n 'host': 'influxdb_dest',\n 'username': 'user2',\n 'password': 'another_super_secret_password',\n 'database': 'another_database'\n })\n\n pylibs.move_points(source, dest)\n\n\n :param source: Dictionary with source measurement description.\n :param dest: Destination client or dictionary with destination measurement description.\n :return:\n " if (not isinstance(dest, dict)): dest = {'client': dest} if (('client' not in source) or ('client' not in dest)): print('Source and destinations clients must be passed in!') exit(1) if ('measurement' not in source): print('Source measurement must be passed in!') exit(2) elif ('measurement' not in dest): dest['measurement'] = source['measurement'] res = source['client'].query('SELECT * FROM {}'.format(source['measurement'])) points = [] point_template = {'time': None, 'measurement': dest['measurement'], 'tags': {}, 'fields': {}} for point in res.get_points(): point_raw = copy.deepcopy(point_template) point_raw['time'] = point['time'] for meta_key in ['fields', 'tags']: for key in source[meta_key]: point_raw[meta_key][key] = point[key] points.append(point_raw) batch_write_points(dest['client'], points)
This function helps transfer points from one database (and/or measurement) to another one. Here is the demo script using that function: import pylibs source = { 'client': pylibs.connect({ 'host': 'influxdb_source', 'username': 'user1', 'password': 'super_secret_password', 'database': 'some_database' }), 'measurement': 'dockerhub', 'fields': ['field_1', 'field_2', 'another_field'], 'tags': ['tag_1', 'tag_2'] } dest = pylibs.connect({ 'host': 'influxdb_dest', 'username': 'user2', 'password': 'another_super_secret_password', 'database': 'another_database' }) pylibs.move_points(source, dest) :param source: Dictionary with source measurement description. :param dest: Destination client or dictionary with destination measurement description. :return:
dbinflux/dbinflux.py
move_points
andyceo/pylibs
1
python
def move_points(source, dest): "This function helps transfer points from one database (and/or measurement) to another one. Here is the demo\n script using that function:\n\n\n import pylibs\n\n source = {\n 'client': pylibs.connect({\n 'host': 'influxdb_source',\n 'username': 'user1',\n 'password': 'super_secret_password',\n 'database': 'some_database'\n }),\n 'measurement': 'dockerhub',\n 'fields': ['field_1', 'field_2', 'another_field'],\n 'tags': ['tag_1', 'tag_2']\n }\n\n dest = pylibs.connect({\n 'host': 'influxdb_dest',\n 'username': 'user2',\n 'password': 'another_super_secret_password',\n 'database': 'another_database'\n })\n\n pylibs.move_points(source, dest)\n\n\n :param source: Dictionary with source measurement description.\n :param dest: Destination client or dictionary with destination measurement description.\n :return:\n " if (not isinstance(dest, dict)): dest = {'client': dest} if (('client' not in source) or ('client' not in dest)): print('Source and destinations clients must be passed in!') exit(1) if ('measurement' not in source): print('Source measurement must be passed in!') exit(2) elif ('measurement' not in dest): dest['measurement'] = source['measurement'] res = source['client'].query('SELECT * FROM {}'.format(source['measurement'])) points = [] point_template = {'time': None, 'measurement': dest['measurement'], 'tags': {}, 'fields': {}} for point in res.get_points(): point_raw = copy.deepcopy(point_template) point_raw['time'] = point['time'] for meta_key in ['fields', 'tags']: for key in source[meta_key]: point_raw[meta_key][key] = point[key] points.append(point_raw) batch_write_points(dest['client'], points)
def move_points(source, dest): "This function helps transfer points from one database (and/or measurement) to another one. Here is the demo\n script using that function:\n\n\n import pylibs\n\n source = {\n 'client': pylibs.connect({\n 'host': 'influxdb_source',\n 'username': 'user1',\n 'password': 'super_secret_password',\n 'database': 'some_database'\n }),\n 'measurement': 'dockerhub',\n 'fields': ['field_1', 'field_2', 'another_field'],\n 'tags': ['tag_1', 'tag_2']\n }\n\n dest = pylibs.connect({\n 'host': 'influxdb_dest',\n 'username': 'user2',\n 'password': 'another_super_secret_password',\n 'database': 'another_database'\n })\n\n pylibs.move_points(source, dest)\n\n\n :param source: Dictionary with source measurement description.\n :param dest: Destination client or dictionary with destination measurement description.\n :return:\n " if (not isinstance(dest, dict)): dest = {'client': dest} if (('client' not in source) or ('client' not in dest)): print('Source and destinations clients must be passed in!') exit(1) if ('measurement' not in source): print('Source measurement must be passed in!') exit(2) elif ('measurement' not in dest): dest['measurement'] = source['measurement'] res = source['client'].query('SELECT * FROM {}'.format(source['measurement'])) points = [] point_template = {'time': None, 'measurement': dest['measurement'], 'tags': {}, 'fields': {}} for point in res.get_points(): point_raw = copy.deepcopy(point_template) point_raw['time'] = point['time'] for meta_key in ['fields', 'tags']: for key in source[meta_key]: point_raw[meta_key][key] = point[key] points.append(point_raw) batch_write_points(dest['client'], points)<|docstring|>This function helps transfer points from one database (and/or measurement) to another one. Here is the demo script using that function: import pylibs source = { 'client': pylibs.connect({ 'host': 'influxdb_source', 'username': 'user1', 'password': 'super_secret_password', 'database': 'some_database' }), 'measurement': 'dockerhub', 'fields': ['field_1', 'field_2', 'another_field'], 'tags': ['tag_1', 'tag_2'] } dest = pylibs.connect({ 'host': 'influxdb_dest', 'username': 'user2', 'password': 'another_super_secret_password', 'database': 'another_database' }) pylibs.move_points(source, dest) :param source: Dictionary with source measurement description. :param dest: Destination client or dictionary with destination measurement description. :return:<|endoftext|>
453ddb7050f9610be2eb68cfeafb789dd82f3e573e83420882fda541b4bbd6cb
def argparse_add_influxdb_options(parser: argparse.ArgumentParser): 'Add InfluxDB connection parameters to given parser. Also read environment variables for defaults' parser.add_argument('--influxdb-host', metavar='HOST', default=os.environ.get('INFLUXDB_HOST', 'localhost'), help='InfluxDB host name') parser.add_argument('--influxdb-port', metavar='PORT', default=os.environ.get('INFLUXDB_PORT', 8086), help='InfluxDB host port') parser.add_argument('--influxdb-user', metavar='USER', default=os.environ.get('INFLUXDB_USER', None), help='InfluxDB user') parser.add_argument('--influxdb-password', metavar='PASSWORD', default=os.environ.get('INFLUXDB_PASSWORD', None), help='InfluxDB user password') parser.add_argument('--influxdb-password-file', metavar='FILE', default=os.environ.get('INFLUXDB_PASSWORD_FILE', None), help='Filename contains InfluxDB user password') parser.add_argument('--influxdb-database', metavar='DATABASE', default=os.environ.get('INFLUXDB_DATABASE', None), help='InfluxDB database to connect to')
Add InfluxDB connection parameters to given parser. Also read environment variables for defaults
dbinflux/dbinflux.py
argparse_add_influxdb_options
andyceo/pylibs
1
python
def argparse_add_influxdb_options(parser: argparse.ArgumentParser): parser.add_argument('--influxdb-host', metavar='HOST', default=os.environ.get('INFLUXDB_HOST', 'localhost'), help='InfluxDB host name') parser.add_argument('--influxdb-port', metavar='PORT', default=os.environ.get('INFLUXDB_PORT', 8086), help='InfluxDB host port') parser.add_argument('--influxdb-user', metavar='USER', default=os.environ.get('INFLUXDB_USER', None), help='InfluxDB user') parser.add_argument('--influxdb-password', metavar='PASSWORD', default=os.environ.get('INFLUXDB_PASSWORD', None), help='InfluxDB user password') parser.add_argument('--influxdb-password-file', metavar='FILE', default=os.environ.get('INFLUXDB_PASSWORD_FILE', None), help='Filename contains InfluxDB user password') parser.add_argument('--influxdb-database', metavar='DATABASE', default=os.environ.get('INFLUXDB_DATABASE', None), help='InfluxDB database to connect to')
def argparse_add_influxdb_options(parser: argparse.ArgumentParser): parser.add_argument('--influxdb-host', metavar='HOST', default=os.environ.get('INFLUXDB_HOST', 'localhost'), help='InfluxDB host name') parser.add_argument('--influxdb-port', metavar='PORT', default=os.environ.get('INFLUXDB_PORT', 8086), help='InfluxDB host port') parser.add_argument('--influxdb-user', metavar='USER', default=os.environ.get('INFLUXDB_USER', None), help='InfluxDB user') parser.add_argument('--influxdb-password', metavar='PASSWORD', default=os.environ.get('INFLUXDB_PASSWORD', None), help='InfluxDB user password') parser.add_argument('--influxdb-password-file', metavar='FILE', default=os.environ.get('INFLUXDB_PASSWORD_FILE', None), help='Filename contains InfluxDB user password') parser.add_argument('--influxdb-database', metavar='DATABASE', default=os.environ.get('INFLUXDB_DATABASE', None), help='InfluxDB database to connect to')<|docstring|>Add InfluxDB connection parameters to given parser. Also read environment variables for defaults<|endoftext|>
cab6b4d170c1a516c69fef1735a7b0f91e3be47631f441d407eed426e623caf8
def timestamp_to_influxdb_format(timestamp=time.time()) -> int: 'Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds).\n @todo: see __main__ section test: fix them\n\n :param timestamp: Datetime in timestamp format (number of seconds that elapsed since\n 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float\n :return: Integer that ready to use in influxdb.client.write_points() function without precision parameter\n ' return round((float(timestamp) * 1000000000))
Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds). @todo: see __main__ section test: fix them :param timestamp: Datetime in timestamp format (number of seconds that elapsed since 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float :return: Integer that ready to use in influxdb.client.write_points() function without precision parameter
dbinflux/dbinflux.py
timestamp_to_influxdb_format
andyceo/pylibs
1
python
def timestamp_to_influxdb_format(timestamp=time.time()) -> int: 'Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds).\n @todo: see __main__ section test: fix them\n\n :param timestamp: Datetime in timestamp format (number of seconds that elapsed since\n 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float\n :return: Integer that ready to use in influxdb.client.write_points() function without precision parameter\n ' return round((float(timestamp) * 1000000000))
def timestamp_to_influxdb_format(timestamp=time.time()) -> int: 'Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds).\n @todo: see __main__ section test: fix them\n\n :param timestamp: Datetime in timestamp format (number of seconds that elapsed since\n 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float\n :return: Integer that ready to use in influxdb.client.write_points() function without precision parameter\n ' return round((float(timestamp) * 1000000000))<|docstring|>Convert given timestamp (number of seconds) to integer of InfluxDB format (number of nanoseconds). @todo: see __main__ section test: fix them :param timestamp: Datetime in timestamp format (number of seconds that elapsed since 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970. Can be string, int or float :return: Integer that ready to use in influxdb.client.write_points() function without precision parameter<|endoftext|>
826172babdf0e393edda5b4c36f785175ac726193e606f3f4f25d3c05080962d
def get_measurements(client: InfluxDBClient, database='') -> list: 'Return the list of measurements in given database' query = 'SHOW MEASUREMENTS' query += (' ON "{}"'.format(database) if database else '') return [_['name'] for _ in client.query(query).get_points()]
Return the list of measurements in given database
dbinflux/dbinflux.py
get_measurements
andyceo/pylibs
1
python
def get_measurements(client: InfluxDBClient, database=) -> list: query = 'SHOW MEASUREMENTS' query += (' ON "{}"'.format(database) if database else ) return [_['name'] for _ in client.query(query).get_points()]
def get_measurements(client: InfluxDBClient, database=) -> list: query = 'SHOW MEASUREMENTS' query += (' ON "{}"'.format(database) if database else ) return [_['name'] for _ in client.query(query).get_points()]<|docstring|>Return the list of measurements in given database<|endoftext|>
91e3eda31913783c0d294ab803242f88716dc9158a9d3465a369e3d3d4fff42c
def get_series(client: InfluxDBClient, database='', measurement='') -> list: 'Return the list of series in given database and measurement' query = 'SHOW SERIES' query += (' ON "{}"'.format(database) if database else '') query += (' FROM "{}"'.format(measurement) if measurement else '') return [_['key'] for _ in client.query(query).get_points()]
Return the list of series in given database and measurement
dbinflux/dbinflux.py
get_series
andyceo/pylibs
1
python
def get_series(client: InfluxDBClient, database=, measurement=) -> list: query = 'SHOW SERIES' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return [_['key'] for _ in client.query(query).get_points()]
def get_series(client: InfluxDBClient, database=, measurement=) -> list: query = 'SHOW SERIES' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return [_['key'] for _ in client.query(query).get_points()]<|docstring|>Return the list of series in given database and measurement<|endoftext|>
5b280c6b40cf1b58a1f0397bee608a3653093950d4a1ce0cc62bfdf1c5963c89
def get_fields_keys(client: InfluxDBClient, database='', measurement='') -> dict: 'Return the dictionary of field keys, where key is field name and value is field type, for given database and\n measurement' query = 'SHOW FIELD KEYS' query += (' ON "{}"'.format(database) if database else '') query += (' FROM "{}"'.format(measurement) if measurement else '') return {_['fieldKey']: _['fieldType'] for _ in client.query(query).get_points()}
Return the dictionary of field keys, where key is field name and value is field type, for given database and measurement
dbinflux/dbinflux.py
get_fields_keys
andyceo/pylibs
1
python
def get_fields_keys(client: InfluxDBClient, database=, measurement=) -> dict: 'Return the dictionary of field keys, where key is field name and value is field type, for given database and\n measurement' query = 'SHOW FIELD KEYS' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return {_['fieldKey']: _['fieldType'] for _ in client.query(query).get_points()}
def get_fields_keys(client: InfluxDBClient, database=, measurement=) -> dict: 'Return the dictionary of field keys, where key is field name and value is field type, for given database and\n measurement' query = 'SHOW FIELD KEYS' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return {_['fieldKey']: _['fieldType'] for _ in client.query(query).get_points()}<|docstring|>Return the dictionary of field keys, where key is field name and value is field type, for given database and measurement<|endoftext|>
723754f614a41423b82f87b92a839ad739bc8721a6c6006fc7c1ac958ef575af
def get_tag_keys(client: InfluxDBClient, database='', measurement='') -> list: 'Return the list of tag keys in given database and measurement' query = 'SHOW TAG KEYS' query += (' ON "{}"'.format(database) if database else '') query += (' FROM "{}"'.format(measurement) if measurement else '') return [_['tagKey'] for _ in client.query(query).get_points()]
Return the list of tag keys in given database and measurement
dbinflux/dbinflux.py
get_tag_keys
andyceo/pylibs
1
python
def get_tag_keys(client: InfluxDBClient, database=, measurement=) -> list: query = 'SHOW TAG KEYS' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return [_['tagKey'] for _ in client.query(query).get_points()]
def get_tag_keys(client: InfluxDBClient, database=, measurement=) -> list: query = 'SHOW TAG KEYS' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) return [_['tagKey'] for _ in client.query(query).get_points()]<|docstring|>Return the list of tag keys in given database and measurement<|endoftext|>
e4f2a4ee6e84357204f053bd450229be14e3af3e9919f1d423a355db6a111d24
def get_tags(client: InfluxDBClient, database='', measurement='') -> dict: 'Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database\n and measurement' tags = {} for tag in get_tag_keys(client, database, measurement): query = 'SHOW TAG VALUES' query += (' ON "{}"'.format(database) if database else '') query += (' FROM "{}"'.format(measurement) if measurement else '') query += ' WITH KEY = "{}"'.format(tag) tags[tag] = [_['value'] for _ in client.query(query).get_points()] return tags
Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database and measurement
dbinflux/dbinflux.py
get_tags
andyceo/pylibs
1
python
def get_tags(client: InfluxDBClient, database=, measurement=) -> dict: 'Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database\n and measurement' tags = {} for tag in get_tag_keys(client, database, measurement): query = 'SHOW TAG VALUES' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) query += ' WITH KEY = "{}"'.format(tag) tags[tag] = [_['value'] for _ in client.query(query).get_points()] return tags
def get_tags(client: InfluxDBClient, database=, measurement=) -> dict: 'Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database\n and measurement' tags = {} for tag in get_tag_keys(client, database, measurement): query = 'SHOW TAG VALUES' query += (' ON "{}"'.format(database) if database else ) query += (' FROM "{}"'.format(measurement) if measurement else ) query += ' WITH KEY = "{}"'.format(tag) tags[tag] = [_['value'] for _ in client.query(query).get_points()] return tags<|docstring|>Return the dictionary of tag keys, where key is tag name and value is a list of tag values, for given database and measurement<|endoftext|>
29ffa14ffaa0af741a0f50f33cda07a8f202eff1475a3932211f7d886851cb9f
def compare_point_with_db(client: InfluxDBClient, measurement: str, tag_set: dict, ts: int, point: dict) -> dict: 'Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB\n with given point. Return comparison stats.\n\n @see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points\n ' query = 'SELECT * FROM "{}" WHERE time = {}'.format(measurement, ts) for (tag_name, v) in tag_set.items(): query += (' AND "{}" = '.format(tag_name) + ('{}'.format(v) if isinstance(v, int) else "'{}'".format(v))) row = [v for v in client.query(query).get_points()] result = {'query': query, 'query_results_count': len(row), 'fields_not_in_db': {}, 'fields_not_equal': {}, 'result': False} if (result['query_results_count'] != 1): return result row = row[0] del row['time'] for (tag_name, _) in tag_set.items(): del row[tag_name] fields_not_in_db = {} fields_not_equal = {} for (field_name, v) in point.items(): if (field_name in row): if (row[field_name] == v): del row[field_name] else: fields_not_equal[field_name] = v else: fields_not_in_db[field_name] = v result['fields_not_in_db'] = fields_not_in_db result['fields_not_equal'] = fields_not_equal result['result'] = ((not fields_not_equal) and (not fields_not_in_db)) return result
Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB with given point. Return comparison stats. @see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points
dbinflux/dbinflux.py
compare_point_with_db
andyceo/pylibs
1
python
def compare_point_with_db(client: InfluxDBClient, measurement: str, tag_set: dict, ts: int, point: dict) -> dict: 'Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB\n with given point. Return comparison stats.\n\n @see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points\n ' query = 'SELECT * FROM "{}" WHERE time = {}'.format(measurement, ts) for (tag_name, v) in tag_set.items(): query += (' AND "{}" = '.format(tag_name) + ('{}'.format(v) if isinstance(v, int) else "'{}'".format(v))) row = [v for v in client.query(query).get_points()] result = {'query': query, 'query_results_count': len(row), 'fields_not_in_db': {}, 'fields_not_equal': {}, 'result': False} if (result['query_results_count'] != 1): return result row = row[0] del row['time'] for (tag_name, _) in tag_set.items(): del row[tag_name] fields_not_in_db = {} fields_not_equal = {} for (field_name, v) in point.items(): if (field_name in row): if (row[field_name] == v): del row[field_name] else: fields_not_equal[field_name] = v else: fields_not_in_db[field_name] = v result['fields_not_in_db'] = fields_not_in_db result['fields_not_equal'] = fields_not_equal result['result'] = ((not fields_not_equal) and (not fields_not_in_db)) return result
def compare_point_with_db(client: InfluxDBClient, measurement: str, tag_set: dict, ts: int, point: dict) -> dict: 'Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB\n with given point. Return comparison stats.\n\n @see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points\n ' query = 'SELECT * FROM "{}" WHERE time = {}'.format(measurement, ts) for (tag_name, v) in tag_set.items(): query += (' AND "{}" = '.format(tag_name) + ('{}'.format(v) if isinstance(v, int) else "'{}'".format(v))) row = [v for v in client.query(query).get_points()] result = {'query': query, 'query_results_count': len(row), 'fields_not_in_db': {}, 'fields_not_equal': {}, 'result': False} if (result['query_results_count'] != 1): return result row = row[0] del row['time'] for (tag_name, _) in tag_set.items(): del row[tag_name] fields_not_in_db = {} fields_not_equal = {} for (field_name, v) in point.items(): if (field_name in row): if (row[field_name] == v): del row[field_name] else: fields_not_equal[field_name] = v else: fields_not_in_db[field_name] = v result['fields_not_in_db'] = fields_not_in_db result['fields_not_equal'] = fields_not_equal result['result'] = ((not fields_not_equal) and (not fields_not_in_db)) return result<|docstring|>Get the point from InfluxDB for given measurement, tag set and timestamp, and compare results from InfluxDB with given point. Return comparison stats. @see https://docs.influxdata.com/influxdb/v1.8/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points<|endoftext|>
6b3deaeaeae88f3872f7f15124f503082ca031774813eb69673155a78c79c11e
def range_extract(lst): 'Yield 2-tuple ranges or 1-tuple single elements from list of increasing ints' lenlst = len(lst) i = 0 while (i < lenlst): low = lst[i] while ((i < (lenlst - 1)) and ((lst[i] + 1) == lst[(i + 1)])): i += 1 hi = lst[i] if ((hi - low) >= 2): (yield (low, hi)) elif ((hi - low) == 1): (yield (low,)) (yield (hi,)) else: (yield (low,)) i += 1
Yield 2-tuple ranges or 1-tuple single elements from list of increasing ints
Task/Range-extraction/Python/range-extraction-1.py
range_extract
mullikine/RosettaCodeData
1
python
def range_extract(lst): lenlst = len(lst) i = 0 while (i < lenlst): low = lst[i] while ((i < (lenlst - 1)) and ((lst[i] + 1) == lst[(i + 1)])): i += 1 hi = lst[i] if ((hi - low) >= 2): (yield (low, hi)) elif ((hi - low) == 1): (yield (low,)) (yield (hi,)) else: (yield (low,)) i += 1
def range_extract(lst): lenlst = len(lst) i = 0 while (i < lenlst): low = lst[i] while ((i < (lenlst - 1)) and ((lst[i] + 1) == lst[(i + 1)])): i += 1 hi = lst[i] if ((hi - low) >= 2): (yield (low, hi)) elif ((hi - low) == 1): (yield (low,)) (yield (hi,)) else: (yield (low,)) i += 1<|docstring|>Yield 2-tuple ranges or 1-tuple single elements from list of increasing ints<|endoftext|>
fe751fdb9546d5a6d250eec35e2d09967fc1d5e716d2579af99eb03f3bc9cbfb
def get_solver_info(): ' Get the information data of the local CP solver that is target by the solver configuration.\n\n This method creates a CP solver to retrieve this information, and end it immediately.\n It returns a dictionary with various information, as in the following example:\n ::\n {\n "AngelVersion" : 5,\n "SourceDate" : "Sep 12 2017",\n "SolverVersion" : "0.0.0.0",\n "IntMin" : -9007199254740991,\n "IntMax" : 9007199254740991,\n "IntervalMin" : -4503599627370494,\n "IntervalMax" : 4503599627370494,\n "AvailableCommands" : ["Exit", "SetCpoModel", "SolveModel", "StartSearch", "SearchNext", "EndSearch", "RefineConflict", "Propagate", "RunSeeds"]\n }\n\n Returns:\n Solver information dictionary, or None if not available.\n ' try: with CpoSolver(CpoModel()) as slvr: if isinstance(slvr.agent, CpoSolverLocal): return slvr.agent.version_info except: pass return None
Get the information data of the local CP solver that is target by the solver configuration. This method creates a CP solver to retrieve this information, and end it immediately. It returns a dictionary with various information, as in the following example: :: { "AngelVersion" : 5, "SourceDate" : "Sep 12 2017", "SolverVersion" : "0.0.0.0", "IntMin" : -9007199254740991, "IntMax" : 9007199254740991, "IntervalMin" : -4503599627370494, "IntervalMax" : 4503599627370494, "AvailableCommands" : ["Exit", "SetCpoModel", "SolveModel", "StartSearch", "SearchNext", "EndSearch", "RefineConflict", "Propagate", "RunSeeds"] } Returns: Solver information dictionary, or None if not available.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
get_solver_info
Infinity8sailor/Quantum-CERN
1
python
def get_solver_info(): ' Get the information data of the local CP solver that is target by the solver configuration.\n\n This method creates a CP solver to retrieve this information, and end it immediately.\n It returns a dictionary with various information, as in the following example:\n ::\n {\n "AngelVersion" : 5,\n "SourceDate" : "Sep 12 2017",\n "SolverVersion" : "0.0.0.0",\n "IntMin" : -9007199254740991,\n "IntMax" : 9007199254740991,\n "IntervalMin" : -4503599627370494,\n "IntervalMax" : 4503599627370494,\n "AvailableCommands" : ["Exit", "SetCpoModel", "SolveModel", "StartSearch", "SearchNext", "EndSearch", "RefineConflict", "Propagate", "RunSeeds"]\n }\n\n Returns:\n Solver information dictionary, or None if not available.\n ' try: with CpoSolver(CpoModel()) as slvr: if isinstance(slvr.agent, CpoSolverLocal): return slvr.agent.version_info except: pass return None
def get_solver_info(): ' Get the information data of the local CP solver that is target by the solver configuration.\n\n This method creates a CP solver to retrieve this information, and end it immediately.\n It returns a dictionary with various information, as in the following example:\n ::\n {\n "AngelVersion" : 5,\n "SourceDate" : "Sep 12 2017",\n "SolverVersion" : "0.0.0.0",\n "IntMin" : -9007199254740991,\n "IntMax" : 9007199254740991,\n "IntervalMin" : -4503599627370494,\n "IntervalMax" : 4503599627370494,\n "AvailableCommands" : ["Exit", "SetCpoModel", "SolveModel", "StartSearch", "SearchNext", "EndSearch", "RefineConflict", "Propagate", "RunSeeds"]\n }\n\n Returns:\n Solver information dictionary, or None if not available.\n ' try: with CpoSolver(CpoModel()) as slvr: if isinstance(slvr.agent, CpoSolverLocal): return slvr.agent.version_info except: pass return None<|docstring|>Get the information data of the local CP solver that is target by the solver configuration. This method creates a CP solver to retrieve this information, and end it immediately. It returns a dictionary with various information, as in the following example: :: { "AngelVersion" : 5, "SourceDate" : "Sep 12 2017", "SolverVersion" : "0.0.0.0", "IntMin" : -9007199254740991, "IntMax" : 9007199254740991, "IntervalMin" : -4503599627370494, "IntervalMax" : 4503599627370494, "AvailableCommands" : ["Exit", "SetCpoModel", "SolveModel", "StartSearch", "SearchNext", "EndSearch", "RefineConflict", "Propagate", "RunSeeds"] } Returns: Solver information dictionary, or None if not available.<|endoftext|>
aa939efe6d7e20153b5c4751fdea750c105c77d64a681f8698b00dccc939bbff
def __init__(self, solver, params, context): ' Create a new solver that solves locally with CP Optimizer Interactive.\n\n Args:\n solver: Parent solver\n params: Solving parameters\n context: Solver context\n Raises:\n CpoException if proxy executable does not exists\n ' self.process = None self.active = True self.timeout_kill = False super(CpoSolverLocal, self).__init__(solver, params, context) xfile = context.execfile if ((xfile is None) or (not is_string(xfile))): raise CpoException("Executable file should be given in 'execfile' context attribute.") if (not os.path.isfile(xfile)): raise CpoException("Executable file '{}' does not exists".format(xfile)) if (not is_exe_file(xfile)): raise CpoException("Executable file '{}' is not executable".format(xfile)) cmd = [context.execfile] if (context.parameters is not None): cmd.extend(context.parameters) context.log(2, "Solver exec command: '", ' '.join(cmd), "'") try: self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=False) except: raise CpoException("Can not execute command '{}'. Please check availability of required executable file.".format(' '.join(cmd))) self.pout = self.process.stdin self.pin = self.process.stdout self.version_info = None timeout = context.process_start_timeout timer = threading.Timer(timeout, self._process_start_timeout) timer.start() try: (evt, data) = self._read_message() except Exception as e: if self.timeout_kill: raise CpoSolverException('Solver process was too long to start and respond ({} seconds). Process has been killed.'.format(timeout)) raise CpoSolverException('Solver sub-process start failure: {}'.format(e)) timer.cancel() if (evt != EVT_VERSION_INFO): raise CpoSolverException('Unexpected event {} received instead of version info event {}.'.format(evt, EVT_VERSION_INFO)) self.version_info = verinf = json.loads(data) self.available_commands = self.version_info['AvailableCommands'] verinf['AgentModule'] = __name__ context.log(3, "Local solver info: '", verinf, "'") sver = self.version_info.get('SolverVersion') if sver: self.process_infos['SolverVersion'] = sver mver = solver.get_model_format_version() if (sver and mver and (compare_natural(mver, sver) > 0)): raise CpoSolverException('Solver version {} is lower than model format version {}.'.format(sver, mver))
Create a new solver that solves locally with CP Optimizer Interactive. Args: solver: Parent solver params: Solving parameters context: Solver context Raises: CpoException if proxy executable does not exists
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
__init__
Infinity8sailor/Quantum-CERN
1
python
def __init__(self, solver, params, context): ' Create a new solver that solves locally with CP Optimizer Interactive.\n\n Args:\n solver: Parent solver\n params: Solving parameters\n context: Solver context\n Raises:\n CpoException if proxy executable does not exists\n ' self.process = None self.active = True self.timeout_kill = False super(CpoSolverLocal, self).__init__(solver, params, context) xfile = context.execfile if ((xfile is None) or (not is_string(xfile))): raise CpoException("Executable file should be given in 'execfile' context attribute.") if (not os.path.isfile(xfile)): raise CpoException("Executable file '{}' does not exists".format(xfile)) if (not is_exe_file(xfile)): raise CpoException("Executable file '{}' is not executable".format(xfile)) cmd = [context.execfile] if (context.parameters is not None): cmd.extend(context.parameters) context.log(2, "Solver exec command: '", ' '.join(cmd), "'") try: self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=False) except: raise CpoException("Can not execute command '{}'. Please check availability of required executable file.".format(' '.join(cmd))) self.pout = self.process.stdin self.pin = self.process.stdout self.version_info = None timeout = context.process_start_timeout timer = threading.Timer(timeout, self._process_start_timeout) timer.start() try: (evt, data) = self._read_message() except Exception as e: if self.timeout_kill: raise CpoSolverException('Solver process was too long to start and respond ({} seconds). Process has been killed.'.format(timeout)) raise CpoSolverException('Solver sub-process start failure: {}'.format(e)) timer.cancel() if (evt != EVT_VERSION_INFO): raise CpoSolverException('Unexpected event {} received instead of version info event {}.'.format(evt, EVT_VERSION_INFO)) self.version_info = verinf = json.loads(data) self.available_commands = self.version_info['AvailableCommands'] verinf['AgentModule'] = __name__ context.log(3, "Local solver info: '", verinf, "'") sver = self.version_info.get('SolverVersion') if sver: self.process_infos['SolverVersion'] = sver mver = solver.get_model_format_version() if (sver and mver and (compare_natural(mver, sver) > 0)): raise CpoSolverException('Solver version {} is lower than model format version {}.'.format(sver, mver))
def __init__(self, solver, params, context): ' Create a new solver that solves locally with CP Optimizer Interactive.\n\n Args:\n solver: Parent solver\n params: Solving parameters\n context: Solver context\n Raises:\n CpoException if proxy executable does not exists\n ' self.process = None self.active = True self.timeout_kill = False super(CpoSolverLocal, self).__init__(solver, params, context) xfile = context.execfile if ((xfile is None) or (not is_string(xfile))): raise CpoException("Executable file should be given in 'execfile' context attribute.") if (not os.path.isfile(xfile)): raise CpoException("Executable file '{}' does not exists".format(xfile)) if (not is_exe_file(xfile)): raise CpoException("Executable file '{}' is not executable".format(xfile)) cmd = [context.execfile] if (context.parameters is not None): cmd.extend(context.parameters) context.log(2, "Solver exec command: '", ' '.join(cmd), "'") try: self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=False) except: raise CpoException("Can not execute command '{}'. Please check availability of required executable file.".format(' '.join(cmd))) self.pout = self.process.stdin self.pin = self.process.stdout self.version_info = None timeout = context.process_start_timeout timer = threading.Timer(timeout, self._process_start_timeout) timer.start() try: (evt, data) = self._read_message() except Exception as e: if self.timeout_kill: raise CpoSolverException('Solver process was too long to start and respond ({} seconds). Process has been killed.'.format(timeout)) raise CpoSolverException('Solver sub-process start failure: {}'.format(e)) timer.cancel() if (evt != EVT_VERSION_INFO): raise CpoSolverException('Unexpected event {} received instead of version info event {}.'.format(evt, EVT_VERSION_INFO)) self.version_info = verinf = json.loads(data) self.available_commands = self.version_info['AvailableCommands'] verinf['AgentModule'] = __name__ context.log(3, "Local solver info: '", verinf, "'") sver = self.version_info.get('SolverVersion') if sver: self.process_infos['SolverVersion'] = sver mver = solver.get_model_format_version() if (sver and mver and (compare_natural(mver, sver) > 0)): raise CpoSolverException('Solver version {} is lower than model format version {}.'.format(sver, mver))<|docstring|>Create a new solver that solves locally with CP Optimizer Interactive. Args: solver: Parent solver params: Solving parameters context: Solver context Raises: CpoException if proxy executable does not exists<|endoftext|>
f24fdfc02cad0494ed3b3d88e8f1c5e8e68feedda1f83832902b12a1ba1adb67
def _process_start_timeout(self): ' Process the raise of start timeout timer ' if (not self.version_info): self.timeout_kill = True self.process.kill()
Process the raise of start timeout timer
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_process_start_timeout
Infinity8sailor/Quantum-CERN
1
python
def _process_start_timeout(self): ' ' if (not self.version_info): self.timeout_kill = True self.process.kill()
def _process_start_timeout(self): ' ' if (not self.version_info): self.timeout_kill = True self.process.kill()<|docstring|>Process the raise of start timeout timer<|endoftext|>
d106b0979071cae791964ab87eb494025d8f29c81e93d83cc99b3f031f4b286c
def solve(self): " Solve the model\n\n According to the value of the context parameter 'verbose', the following information is logged\n if the log output is set:\n * 1: Total time spent to solve the model\n * 2: The process exec file\n * 3: Content of the JSON response\n * 4: Solver traces (if any)\n * 5: Messages sent/receive to/from process\n\n Returns:\n Model solve result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n " self._init_model_in_solver() self._write_message(CMD_SOLVE_MODEL) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
Solve the model According to the value of the context parameter 'verbose', the following information is logged if the log output is set: * 1: Total time spent to solve the model * 2: The process exec file * 3: Content of the JSON response * 4: Solver traces (if any) * 5: Messages sent/receive to/from process Returns: Model solve result, object of class :class:`~docplex.cp.solution.CpoSolveResult`.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
solve
Infinity8sailor/Quantum-CERN
1
python
def solve(self): " Solve the model\n\n According to the value of the context parameter 'verbose', the following information is logged\n if the log output is set:\n * 1: Total time spent to solve the model\n * 2: The process exec file\n * 3: Content of the JSON response\n * 4: Solver traces (if any)\n * 5: Messages sent/receive to/from process\n\n Returns:\n Model solve result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n " self._init_model_in_solver() self._write_message(CMD_SOLVE_MODEL) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
def solve(self): " Solve the model\n\n According to the value of the context parameter 'verbose', the following information is logged\n if the log output is set:\n * 1: Total time spent to solve the model\n * 2: The process exec file\n * 3: Content of the JSON response\n * 4: Solver traces (if any)\n * 5: Messages sent/receive to/from process\n\n Returns:\n Model solve result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n " self._init_model_in_solver() self._write_message(CMD_SOLVE_MODEL) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)<|docstring|>Solve the model According to the value of the context parameter 'verbose', the following information is logged if the log output is set: * 1: Total time spent to solve the model * 2: The process exec file * 3: Content of the JSON response * 4: Solver traces (if any) * 5: Messages sent/receive to/from process Returns: Model solve result, object of class :class:`~docplex.cp.solution.CpoSolveResult`.<|endoftext|>
969e2d0ce2b917465cb6e673e5182b4f8a70a58a43fbb8f011d31613838d52b4
def start_search(self): ' Start a new search. Solutions are retrieved using method search_next().\n ' self._init_model_in_solver() self._write_message(CMD_START_SEARCH)
Start a new search. Solutions are retrieved using method search_next().
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
start_search
Infinity8sailor/Quantum-CERN
1
python
def start_search(self): ' \n ' self._init_model_in_solver() self._write_message(CMD_START_SEARCH)
def start_search(self): ' \n ' self._init_model_in_solver() self._write_message(CMD_START_SEARCH)<|docstring|>Start a new search. Solutions are retrieved using method search_next().<|endoftext|>
efa6143595937d90947228ad118bfda0276ee44f038a3e16ccc564af221a592c
def search_next(self): ' Get the next available solution.\n\n (This method starts search automatically.)\n\n Returns:\n Next model result (type CpoSolveResult)\n ' self._write_message(CMD_SEARCH_NEXT) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
Get the next available solution. (This method starts search automatically.) Returns: Next model result (type CpoSolveResult)
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
search_next
Infinity8sailor/Quantum-CERN
1
python
def search_next(self): ' Get the next available solution.\n\n (This method starts search automatically.)\n\n Returns:\n Next model result (type CpoSolveResult)\n ' self._write_message(CMD_SEARCH_NEXT) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
def search_next(self): ' Get the next available solution.\n\n (This method starts search automatically.)\n\n Returns:\n Next model result (type CpoSolveResult)\n ' self._write_message(CMD_SEARCH_NEXT) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)<|docstring|>Get the next available solution. (This method starts search automatically.) Returns: Next model result (type CpoSolveResult)<|endoftext|>
4a3de908c65c8b0022444bdd2657052c83ffa4c602a359147998b004b33d4316
def end_search(self): ' End current search.\n\n Returns:\n Last (fail) solve result with last solve information (type CpoSolveResult)\n ' self._write_message(CMD_END_SEARCH) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
End current search. Returns: Last (fail) solve result with last solve information (type CpoSolveResult)
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
end_search
Infinity8sailor/Quantum-CERN
1
python
def end_search(self): ' End current search.\n\n Returns:\n Last (fail) solve result with last solve information (type CpoSolveResult)\n ' self._write_message(CMD_END_SEARCH) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
def end_search(self): ' End current search.\n\n Returns:\n Last (fail) solve result with last solve information (type CpoSolveResult)\n ' self._write_message(CMD_END_SEARCH) jsol = self._wait_json_result(EVT_SOLVE_RESULT) return self._create_result_object(CpoSolveResult, jsol)<|docstring|>End current search. Returns: Last (fail) solve result with last solve information (type CpoSolveResult)<|endoftext|>
a01ee5420b896905cb31ab8b14de9a6b2836095d1fea07305d18d286537c98b9
def abort_search(self): ' Abort current search.\n This method is designed to be called by a different thread than the one currently solving.\n ' self.end()
Abort current search. This method is designed to be called by a different thread than the one currently solving.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
abort_search
Infinity8sailor/Quantum-CERN
1
python
def abort_search(self): ' Abort current search.\n This method is designed to be called by a different thread than the one currently solving.\n ' self.end()
def abort_search(self): ' Abort current search.\n This method is designed to be called by a different thread than the one currently solving.\n ' self.end()<|docstring|>Abort current search. This method is designed to be called by a different thread than the one currently solving.<|endoftext|>
573bfe5e9f8ceed39bd63044abff54b9cb4a37f35e656a9733f4778fa2b16da8
def refine_conflict(self): ' This method identifies a minimal conflict for the infeasibility of the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.refine_conflict` for details.\n\n Returns:\n Conflict result,\n object of class :class:`~docplex.cp.solution.CpoRefineConflictResult`.\n ' self._init_model_in_solver() pver = self.version_info.get('ProxyVersion') if (self.context.add_conflict_as_cpo and pver and (int(pver) >= 9)): self._write_message(CMD_REFINE_CONFLICT, bytearray([1])) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = self._wait_event(EVT_CONFLICT_RESULT_CPO) else: self._write_message(CMD_REFINE_CONFLICT) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = None result = self._create_result_object(CpoRefineConflictResult, jsol) result.cpo_conflict = cposol return result
This method identifies a minimal conflict for the infeasibility of the current model. See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.refine_conflict` for details. Returns: Conflict result, object of class :class:`~docplex.cp.solution.CpoRefineConflictResult`.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
refine_conflict
Infinity8sailor/Quantum-CERN
1
python
def refine_conflict(self): ' This method identifies a minimal conflict for the infeasibility of the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.refine_conflict` for details.\n\n Returns:\n Conflict result,\n object of class :class:`~docplex.cp.solution.CpoRefineConflictResult`.\n ' self._init_model_in_solver() pver = self.version_info.get('ProxyVersion') if (self.context.add_conflict_as_cpo and pver and (int(pver) >= 9)): self._write_message(CMD_REFINE_CONFLICT, bytearray([1])) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = self._wait_event(EVT_CONFLICT_RESULT_CPO) else: self._write_message(CMD_REFINE_CONFLICT) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = None result = self._create_result_object(CpoRefineConflictResult, jsol) result.cpo_conflict = cposol return result
def refine_conflict(self): ' This method identifies a minimal conflict for the infeasibility of the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.refine_conflict` for details.\n\n Returns:\n Conflict result,\n object of class :class:`~docplex.cp.solution.CpoRefineConflictResult`.\n ' self._init_model_in_solver() pver = self.version_info.get('ProxyVersion') if (self.context.add_conflict_as_cpo and pver and (int(pver) >= 9)): self._write_message(CMD_REFINE_CONFLICT, bytearray([1])) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = self._wait_event(EVT_CONFLICT_RESULT_CPO) else: self._write_message(CMD_REFINE_CONFLICT) jsol = self._wait_json_result(EVT_CONFLICT_RESULT) cposol = None result = self._create_result_object(CpoRefineConflictResult, jsol) result.cpo_conflict = cposol return result<|docstring|>This method identifies a minimal conflict for the infeasibility of the current model. See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.refine_conflict` for details. Returns: Conflict result, object of class :class:`~docplex.cp.solution.CpoRefineConflictResult`.<|endoftext|>
351fa4b8b552455248e9ab2cdcabb2eb78a67ba8bb1a4351b6a5319b91b85a0b
def propagate(self): ' This method invokes the propagation on the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.propagate` for details.\n\n Returns:\n Propagation result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n ' self._init_model_in_solver() self._write_message(CMD_PROPAGATE) jsol = self._wait_json_result(EVT_PROPAGATE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
This method invokes the propagation on the current model. See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.propagate` for details. Returns: Propagation result, object of class :class:`~docplex.cp.solution.CpoSolveResult`.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
propagate
Infinity8sailor/Quantum-CERN
1
python
def propagate(self): ' This method invokes the propagation on the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.propagate` for details.\n\n Returns:\n Propagation result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n ' self._init_model_in_solver() self._write_message(CMD_PROPAGATE) jsol = self._wait_json_result(EVT_PROPAGATE_RESULT) return self._create_result_object(CpoSolveResult, jsol)
def propagate(self): ' This method invokes the propagation on the current model.\n\n See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.propagate` for details.\n\n Returns:\n Propagation result,\n object of class :class:`~docplex.cp.solution.CpoSolveResult`.\n ' self._init_model_in_solver() self._write_message(CMD_PROPAGATE) jsol = self._wait_json_result(EVT_PROPAGATE_RESULT) return self._create_result_object(CpoSolveResult, jsol)<|docstring|>This method invokes the propagation on the current model. See documentation of :meth:`~docplex.cp.solver.solver.CpoSolver.propagate` for details. Returns: Propagation result, object of class :class:`~docplex.cp.solution.CpoSolveResult`.<|endoftext|>
9516ffac8137e156a654502d79029662e8c7210e78a20d903da60450daefb7cf
def run_seeds(self, nbrun): ' This method runs *nbrun* times the CP optimizer search with different random seeds\n and computes statistics from the result of these runs.\n\n This method does not return anything. Result statistics are displayed on the log output\n that should be activated.\n\n Each run of the solver is stopped according to single solve conditions (TimeLimit for example).\n Total run time is then expected to take *nbruns* times the duration of a single run.\n\n Args:\n nbrun: Number of runs with different seeds.\n Returns:\n Run result, object of class :class:`~docplex.cp.solution.CpoRunResult`.\n Raises:\n CpoNotSupportedException: method not available in local solver.\n ' self._init_model_in_solver() if (CMD_RUN_SEEDS not in self.available_commands): raise CpoNotSupportedException("Method 'run_seeds' is not available in local solver '{}'".format(self.context.execfile)) nbfrm = bytearray(4) encode_integer_big_endian_4(nbrun, nbfrm, 0) self._write_message(CMD_RUN_SEEDS, data=nbfrm) self._wait_event(EVT_RUN_SEEDS_RESULT) return self._create_result_object(CpoRunResult)
This method runs *nbrun* times the CP optimizer search with different random seeds and computes statistics from the result of these runs. This method does not return anything. Result statistics are displayed on the log output that should be activated. Each run of the solver is stopped according to single solve conditions (TimeLimit for example). Total run time is then expected to take *nbruns* times the duration of a single run. Args: nbrun: Number of runs with different seeds. Returns: Run result, object of class :class:`~docplex.cp.solution.CpoRunResult`. Raises: CpoNotSupportedException: method not available in local solver.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
run_seeds
Infinity8sailor/Quantum-CERN
1
python
def run_seeds(self, nbrun): ' This method runs *nbrun* times the CP optimizer search with different random seeds\n and computes statistics from the result of these runs.\n\n This method does not return anything. Result statistics are displayed on the log output\n that should be activated.\n\n Each run of the solver is stopped according to single solve conditions (TimeLimit for example).\n Total run time is then expected to take *nbruns* times the duration of a single run.\n\n Args:\n nbrun: Number of runs with different seeds.\n Returns:\n Run result, object of class :class:`~docplex.cp.solution.CpoRunResult`.\n Raises:\n CpoNotSupportedException: method not available in local solver.\n ' self._init_model_in_solver() if (CMD_RUN_SEEDS not in self.available_commands): raise CpoNotSupportedException("Method 'run_seeds' is not available in local solver '{}'".format(self.context.execfile)) nbfrm = bytearray(4) encode_integer_big_endian_4(nbrun, nbfrm, 0) self._write_message(CMD_RUN_SEEDS, data=nbfrm) self._wait_event(EVT_RUN_SEEDS_RESULT) return self._create_result_object(CpoRunResult)
def run_seeds(self, nbrun): ' This method runs *nbrun* times the CP optimizer search with different random seeds\n and computes statistics from the result of these runs.\n\n This method does not return anything. Result statistics are displayed on the log output\n that should be activated.\n\n Each run of the solver is stopped according to single solve conditions (TimeLimit for example).\n Total run time is then expected to take *nbruns* times the duration of a single run.\n\n Args:\n nbrun: Number of runs with different seeds.\n Returns:\n Run result, object of class :class:`~docplex.cp.solution.CpoRunResult`.\n Raises:\n CpoNotSupportedException: method not available in local solver.\n ' self._init_model_in_solver() if (CMD_RUN_SEEDS not in self.available_commands): raise CpoNotSupportedException("Method 'run_seeds' is not available in local solver '{}'".format(self.context.execfile)) nbfrm = bytearray(4) encode_integer_big_endian_4(nbrun, nbfrm, 0) self._write_message(CMD_RUN_SEEDS, data=nbfrm) self._wait_event(EVT_RUN_SEEDS_RESULT) return self._create_result_object(CpoRunResult)<|docstring|>This method runs *nbrun* times the CP optimizer search with different random seeds and computes statistics from the result of these runs. This method does not return anything. Result statistics are displayed on the log output that should be activated. Each run of the solver is stopped according to single solve conditions (TimeLimit for example). Total run time is then expected to take *nbruns* times the duration of a single run. Args: nbrun: Number of runs with different seeds. Returns: Run result, object of class :class:`~docplex.cp.solution.CpoRunResult`. Raises: CpoNotSupportedException: method not available in local solver.<|endoftext|>
763473214110331644c98c536ab27f9aa57f4307a375e8a639c5b08e5ca01c2e
def set_explain_failure_tags(self, ltags=None): " This method allows to set the list of failure tags to explain in the next solve.\n\n The failure tags are displayed in the log when the parameter :attr:`~docplex.cp.CpoParameters.LogSearchTags`\n is set to 'On'.\n All existing failure tags previously set are cleared prior to set the new ones.\n Calling this method with an empty list is then equivalent to just clear tags.\n\n Args:\n ltags: List of tag ids to explain\n " self._init_model_in_solver() if (MD_SET_FAILURE_TAGS not in self.available_commands): raise CpoNotSupportedException("Method 'set_explain_failure_tags' is not available in local solver '{}'".format(self.context.execfile)) if (ltags is None): ltags = [] elif (not is_array(ltags)): ltags = (ltags,) nbtags = len(ltags) tagfrm = bytearray((4 * (nbtags + 1))) encode_integer_big_endian_4(nbtags, tagfrm, 0) for (i, t) in enumerate(ltags): encode_integer_big_endian_4(t, tagfrm, (4 * (i + 1))) self._write_message(MD_SET_FAILURE_TAGS, data=tagfrm) self._wait_event(EVT_SUCCESS)
This method allows to set the list of failure tags to explain in the next solve. The failure tags are displayed in the log when the parameter :attr:`~docplex.cp.CpoParameters.LogSearchTags` is set to 'On'. All existing failure tags previously set are cleared prior to set the new ones. Calling this method with an empty list is then equivalent to just clear tags. Args: ltags: List of tag ids to explain
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
set_explain_failure_tags
Infinity8sailor/Quantum-CERN
1
python
def set_explain_failure_tags(self, ltags=None): " This method allows to set the list of failure tags to explain in the next solve.\n\n The failure tags are displayed in the log when the parameter :attr:`~docplex.cp.CpoParameters.LogSearchTags`\n is set to 'On'.\n All existing failure tags previously set are cleared prior to set the new ones.\n Calling this method with an empty list is then equivalent to just clear tags.\n\n Args:\n ltags: List of tag ids to explain\n " self._init_model_in_solver() if (MD_SET_FAILURE_TAGS not in self.available_commands): raise CpoNotSupportedException("Method 'set_explain_failure_tags' is not available in local solver '{}'".format(self.context.execfile)) if (ltags is None): ltags = [] elif (not is_array(ltags)): ltags = (ltags,) nbtags = len(ltags) tagfrm = bytearray((4 * (nbtags + 1))) encode_integer_big_endian_4(nbtags, tagfrm, 0) for (i, t) in enumerate(ltags): encode_integer_big_endian_4(t, tagfrm, (4 * (i + 1))) self._write_message(MD_SET_FAILURE_TAGS, data=tagfrm) self._wait_event(EVT_SUCCESS)
def set_explain_failure_tags(self, ltags=None): " This method allows to set the list of failure tags to explain in the next solve.\n\n The failure tags are displayed in the log when the parameter :attr:`~docplex.cp.CpoParameters.LogSearchTags`\n is set to 'On'.\n All existing failure tags previously set are cleared prior to set the new ones.\n Calling this method with an empty list is then equivalent to just clear tags.\n\n Args:\n ltags: List of tag ids to explain\n " self._init_model_in_solver() if (MD_SET_FAILURE_TAGS not in self.available_commands): raise CpoNotSupportedException("Method 'set_explain_failure_tags' is not available in local solver '{}'".format(self.context.execfile)) if (ltags is None): ltags = [] elif (not is_array(ltags)): ltags = (ltags,) nbtags = len(ltags) tagfrm = bytearray((4 * (nbtags + 1))) encode_integer_big_endian_4(nbtags, tagfrm, 0) for (i, t) in enumerate(ltags): encode_integer_big_endian_4(t, tagfrm, (4 * (i + 1))) self._write_message(MD_SET_FAILURE_TAGS, data=tagfrm) self._wait_event(EVT_SUCCESS)<|docstring|>This method allows to set the list of failure tags to explain in the next solve. The failure tags are displayed in the log when the parameter :attr:`~docplex.cp.CpoParameters.LogSearchTags` is set to 'On'. All existing failure tags previously set are cleared prior to set the new ones. Calling this method with an empty list is then equivalent to just clear tags. Args: ltags: List of tag ids to explain<|endoftext|>
90921c3c8de1f2470f519e1f927d364281cfba8dd35c632ec03ca4299816cee5
def end(self): ' End solver and release all resources.\n ' if self.active: self.active = False try: self._write_message(CMD_EXIT) except: pass try: self.pout.close() except: pass try: self.pin.close() except: pass try: self.process.kill() except: pass try: self.process.wait() except: pass self.process = None super(CpoSolverLocal, self).end()
End solver and release all resources.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
end
Infinity8sailor/Quantum-CERN
1
python
def end(self): ' \n ' if self.active: self.active = False try: self._write_message(CMD_EXIT) except: pass try: self.pout.close() except: pass try: self.pin.close() except: pass try: self.process.kill() except: pass try: self.process.wait() except: pass self.process = None super(CpoSolverLocal, self).end()
def end(self): ' \n ' if self.active: self.active = False try: self._write_message(CMD_EXIT) except: pass try: self.pout.close() except: pass try: self.pin.close() except: pass try: self.process.kill() except: pass try: self.process.wait() except: pass self.process = None super(CpoSolverLocal, self).end()<|docstring|>End solver and release all resources.<|endoftext|>
a223a4b5574a820f5b705e554fa0bade789219265c01f058d57d4d16a819f438
def _wait_event(self, xevt): ' Wait for a particular event while forwarding logs if any.\n Args:\n xevt: Expected event\n Returns:\n Message data\n Raises:\n SolverException if an error occurs\n ' firsterror = None while True: (evt, data) = self._read_message() if (evt == xevt): return data elif (evt in (EVT_SOLVER_OUT_STREAM, EVT_SOLVER_WARN_STREAM)): if data: if self.log_enabled: self._add_log_data(data) elif (evt == EVT_SOLVER_ERR_STREAM): if data: if (firsterror is None): firsterror = data.replace('\n', '') out = (self.log_output if (self.log_output is not None) else sys.stdout) out.write('ERROR: {}\n'.format(data)) out.flush() elif (evt == EVT_TRACE): self.context.log(4, ('ANGEL TRACE: ' + data)) elif (evt == EVT_ERROR): if (firsterror is not None): data += ((' (' + firsterror) + ')') self.end() raise CpoSolverException(('Solver error: ' + data)) elif (evt == EVT_CALLBACK_EVENT): event = data (evt, data) = self._read_message() assert (evt == EVT_CALLBACK_DATA) res = self._create_result_object(CpoSolveResult, data) self.solver._notify_callback_event(event, res) else: self.end() raise CpoSolverException(('Unknown event received from local solver: ' + str(evt)))
Wait for a particular event while forwarding logs if any. Args: xevt: Expected event Returns: Message data Raises: SolverException if an error occurs
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_wait_event
Infinity8sailor/Quantum-CERN
1
python
def _wait_event(self, xevt): ' Wait for a particular event while forwarding logs if any.\n Args:\n xevt: Expected event\n Returns:\n Message data\n Raises:\n SolverException if an error occurs\n ' firsterror = None while True: (evt, data) = self._read_message() if (evt == xevt): return data elif (evt in (EVT_SOLVER_OUT_STREAM, EVT_SOLVER_WARN_STREAM)): if data: if self.log_enabled: self._add_log_data(data) elif (evt == EVT_SOLVER_ERR_STREAM): if data: if (firsterror is None): firsterror = data.replace('\n', ) out = (self.log_output if (self.log_output is not None) else sys.stdout) out.write('ERROR: {}\n'.format(data)) out.flush() elif (evt == EVT_TRACE): self.context.log(4, ('ANGEL TRACE: ' + data)) elif (evt == EVT_ERROR): if (firsterror is not None): data += ((' (' + firsterror) + ')') self.end() raise CpoSolverException(('Solver error: ' + data)) elif (evt == EVT_CALLBACK_EVENT): event = data (evt, data) = self._read_message() assert (evt == EVT_CALLBACK_DATA) res = self._create_result_object(CpoSolveResult, data) self.solver._notify_callback_event(event, res) else: self.end() raise CpoSolverException(('Unknown event received from local solver: ' + str(evt)))
def _wait_event(self, xevt): ' Wait for a particular event while forwarding logs if any.\n Args:\n xevt: Expected event\n Returns:\n Message data\n Raises:\n SolverException if an error occurs\n ' firsterror = None while True: (evt, data) = self._read_message() if (evt == xevt): return data elif (evt in (EVT_SOLVER_OUT_STREAM, EVT_SOLVER_WARN_STREAM)): if data: if self.log_enabled: self._add_log_data(data) elif (evt == EVT_SOLVER_ERR_STREAM): if data: if (firsterror is None): firsterror = data.replace('\n', ) out = (self.log_output if (self.log_output is not None) else sys.stdout) out.write('ERROR: {}\n'.format(data)) out.flush() elif (evt == EVT_TRACE): self.context.log(4, ('ANGEL TRACE: ' + data)) elif (evt == EVT_ERROR): if (firsterror is not None): data += ((' (' + firsterror) + ')') self.end() raise CpoSolverException(('Solver error: ' + data)) elif (evt == EVT_CALLBACK_EVENT): event = data (evt, data) = self._read_message() assert (evt == EVT_CALLBACK_DATA) res = self._create_result_object(CpoSolveResult, data) self.solver._notify_callback_event(event, res) else: self.end() raise CpoSolverException(('Unknown event received from local solver: ' + str(evt)))<|docstring|>Wait for a particular event while forwarding logs if any. Args: xevt: Expected event Returns: Message data Raises: SolverException if an error occurs<|endoftext|>
187e0f05b5f36aa57986911f80984afe908f07e042f198344d8ee291fd83e6ee
def _wait_json_result(self, evt): ' Wait for a JSON result while forwarding logs if any.\n Args:\n evt: Event to wait for\n Returns:\n JSON solution string, decoded from UTF8\n ' data = self._wait_event(evt) self._set_last_json_result_string(data) self.context.log(3, 'JSON result:\n', data) return self.last_json_result
Wait for a JSON result while forwarding logs if any. Args: evt: Event to wait for Returns: JSON solution string, decoded from UTF8
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_wait_json_result
Infinity8sailor/Quantum-CERN
1
python
def _wait_json_result(self, evt): ' Wait for a JSON result while forwarding logs if any.\n Args:\n evt: Event to wait for\n Returns:\n JSON solution string, decoded from UTF8\n ' data = self._wait_event(evt) self._set_last_json_result_string(data) self.context.log(3, 'JSON result:\n', data) return self.last_json_result
def _wait_json_result(self, evt): ' Wait for a JSON result while forwarding logs if any.\n Args:\n evt: Event to wait for\n Returns:\n JSON solution string, decoded from UTF8\n ' data = self._wait_event(evt) self._set_last_json_result_string(data) self.context.log(3, 'JSON result:\n', data) return self.last_json_result<|docstring|>Wait for a JSON result while forwarding logs if any. Args: evt: Event to wait for Returns: JSON solution string, decoded from UTF8<|endoftext|>
ef2e354219d527cad6919d0b3efbc05224065bc9163bc44b45303a463c44be64
def _write_message(self, cid, data=None): ' Write a message to the solver process\n Args:\n cid: Command name\n data: Data to write, already encoded in UTF8 if required\n ' stime = time.time() cid = cid.encode('utf-8') if is_string(data): data = data.encode('utf-8') nstime = time.time() self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_ENCODE_TIME, (nstime - stime)) tlen = len(cid) if (data is not None): tlen += (len(data) + 1) if (tlen > 4294967295): raise CpoSolverException('Try to send a message with length {}, greater than {}.'.format(tlen, 4294967295)) frame = bytearray(6) frame[0] = 202 frame[1] = 254 encode_integer_big_endian_4(tlen, frame, 2) self.context.log(5, 'Send message: cmd=', cid, ', tsize=', tlen) if (data is None): frame = (frame + cid) else: frame = (((frame + cid) + bytearray(1)) + data) self.pout.write(frame) self.pout.flush() self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_TIME, (time.time() - nstime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_SIZE, len(frame))
Write a message to the solver process Args: cid: Command name data: Data to write, already encoded in UTF8 if required
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_write_message
Infinity8sailor/Quantum-CERN
1
python
def _write_message(self, cid, data=None): ' Write a message to the solver process\n Args:\n cid: Command name\n data: Data to write, already encoded in UTF8 if required\n ' stime = time.time() cid = cid.encode('utf-8') if is_string(data): data = data.encode('utf-8') nstime = time.time() self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_ENCODE_TIME, (nstime - stime)) tlen = len(cid) if (data is not None): tlen += (len(data) + 1) if (tlen > 4294967295): raise CpoSolverException('Try to send a message with length {}, greater than {}.'.format(tlen, 4294967295)) frame = bytearray(6) frame[0] = 202 frame[1] = 254 encode_integer_big_endian_4(tlen, frame, 2) self.context.log(5, 'Send message: cmd=', cid, ', tsize=', tlen) if (data is None): frame = (frame + cid) else: frame = (((frame + cid) + bytearray(1)) + data) self.pout.write(frame) self.pout.flush() self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_TIME, (time.time() - nstime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_SIZE, len(frame))
def _write_message(self, cid, data=None): ' Write a message to the solver process\n Args:\n cid: Command name\n data: Data to write, already encoded in UTF8 if required\n ' stime = time.time() cid = cid.encode('utf-8') if is_string(data): data = data.encode('utf-8') nstime = time.time() self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_ENCODE_TIME, (nstime - stime)) tlen = len(cid) if (data is not None): tlen += (len(data) + 1) if (tlen > 4294967295): raise CpoSolverException('Try to send a message with length {}, greater than {}.'.format(tlen, 4294967295)) frame = bytearray(6) frame[0] = 202 frame[1] = 254 encode_integer_big_endian_4(tlen, frame, 2) self.context.log(5, 'Send message: cmd=', cid, ', tsize=', tlen) if (data is None): frame = (frame + cid) else: frame = (((frame + cid) + bytearray(1)) + data) self.pout.write(frame) self.pout.flush() self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_TIME, (time.time() - nstime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_SEND_SIZE, len(frame))<|docstring|>Write a message to the solver process Args: cid: Command name data: Data to write, already encoded in UTF8 if required<|endoftext|>
a394fd67ca9cf1baf44362981f62cfc2179884074c69242c9410a7c3908cbb30
def _read_message(self): ' Read a message from the solver process\n Returns:\n Tuple (evt, data)\n ' frame = self._read_frame(6) if ((frame[0] != 202) or (frame[1] != 254)): erline = (frame + self._read_error_message()) erline = erline.decode() self.end() raise CpoSolverException(('Invalid message header. Possible error generated by solver: ' + erline)) tsize = decode_integer_big_endian_4(frame, 2) data = self._read_frame(tsize) ename = 0 while ((ename < tsize) and (data[ename] != 0)): ename += 1 stime = time.time() if (ename == tsize): evt = data.decode('utf-8') data = None else: evt = data[0:ename].decode('utf-8') data = data[(ename + 1):].decode('utf-8') self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_DECODE_TIME, (time.time() - stime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, (tsize + 6)) self.context.log(5, 'Read message: ', evt, ", data: '", data, "'") return (evt, data)
Read a message from the solver process Returns: Tuple (evt, data)
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_read_message
Infinity8sailor/Quantum-CERN
1
python
def _read_message(self): ' Read a message from the solver process\n Returns:\n Tuple (evt, data)\n ' frame = self._read_frame(6) if ((frame[0] != 202) or (frame[1] != 254)): erline = (frame + self._read_error_message()) erline = erline.decode() self.end() raise CpoSolverException(('Invalid message header. Possible error generated by solver: ' + erline)) tsize = decode_integer_big_endian_4(frame, 2) data = self._read_frame(tsize) ename = 0 while ((ename < tsize) and (data[ename] != 0)): ename += 1 stime = time.time() if (ename == tsize): evt = data.decode('utf-8') data = None else: evt = data[0:ename].decode('utf-8') data = data[(ename + 1):].decode('utf-8') self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_DECODE_TIME, (time.time() - stime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, (tsize + 6)) self.context.log(5, 'Read message: ', evt, ", data: '", data, "'") return (evt, data)
def _read_message(self): ' Read a message from the solver process\n Returns:\n Tuple (evt, data)\n ' frame = self._read_frame(6) if ((frame[0] != 202) or (frame[1] != 254)): erline = (frame + self._read_error_message()) erline = erline.decode() self.end() raise CpoSolverException(('Invalid message header. Possible error generated by solver: ' + erline)) tsize = decode_integer_big_endian_4(frame, 2) data = self._read_frame(tsize) ename = 0 while ((ename < tsize) and (data[ename] != 0)): ename += 1 stime = time.time() if (ename == tsize): evt = data.decode('utf-8') data = None else: evt = data[0:ename].decode('utf-8') data = data[(ename + 1):].decode('utf-8') self.process_infos.incr(CpoProcessInfos.TOTAL_UTF8_DECODE_TIME, (time.time() - stime)) self.process_infos.incr(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, (tsize + 6)) self.context.log(5, 'Read message: ', evt, ", data: '", data, "'") return (evt, data)<|docstring|>Read a message from the solver process Returns: Tuple (evt, data)<|endoftext|>
e6212817ffcbaddd7dd10004218df6f68e8eeb02e7c3c61f04f09ff99b3b76bc
def _read_frame(self, nbb): ' Read a byte frame from input stream\n Args:\n nbb: Number of bytes to read\n Returns:\n Byte array\n ' data = self.pin.read(nbb) if (len(data) != nbb): if (len(data) == 0): if (self.process_infos.get(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, 0) == 0): if IS_WINDOWS: raise CpoSolverException('Nothing to read from local solver process. Possibly not started because cplex dll is not accessible.') else: raise CpoSolverException('Nothing to read from local solver process. Check its availability.') else: try: self.process.wait() rc = self.process.returncode except: rc = 'unknown' raise CpoSolverException('Nothing to read from local solver process. Process seems to have been stopped (rc={}).'.format(rc)) else: raise CpoSolverException('Read only {} bytes when {} was expected.'.format(len(data), nbb)) if IS_PYTHON_2: data = bytearray(data) return data
Read a byte frame from input stream Args: nbb: Number of bytes to read Returns: Byte array
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_read_frame
Infinity8sailor/Quantum-CERN
1
python
def _read_frame(self, nbb): ' Read a byte frame from input stream\n Args:\n nbb: Number of bytes to read\n Returns:\n Byte array\n ' data = self.pin.read(nbb) if (len(data) != nbb): if (len(data) == 0): if (self.process_infos.get(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, 0) == 0): if IS_WINDOWS: raise CpoSolverException('Nothing to read from local solver process. Possibly not started because cplex dll is not accessible.') else: raise CpoSolverException('Nothing to read from local solver process. Check its availability.') else: try: self.process.wait() rc = self.process.returncode except: rc = 'unknown' raise CpoSolverException('Nothing to read from local solver process. Process seems to have been stopped (rc={}).'.format(rc)) else: raise CpoSolverException('Read only {} bytes when {} was expected.'.format(len(data), nbb)) if IS_PYTHON_2: data = bytearray(data) return data
def _read_frame(self, nbb): ' Read a byte frame from input stream\n Args:\n nbb: Number of bytes to read\n Returns:\n Byte array\n ' data = self.pin.read(nbb) if (len(data) != nbb): if (len(data) == 0): if (self.process_infos.get(CpoProcessInfos.TOTAL_DATA_RECEIVE_SIZE, 0) == 0): if IS_WINDOWS: raise CpoSolverException('Nothing to read from local solver process. Possibly not started because cplex dll is not accessible.') else: raise CpoSolverException('Nothing to read from local solver process. Check its availability.') else: try: self.process.wait() rc = self.process.returncode except: rc = 'unknown' raise CpoSolverException('Nothing to read from local solver process. Process seems to have been stopped (rc={}).'.format(rc)) else: raise CpoSolverException('Read only {} bytes when {} was expected.'.format(len(data), nbb)) if IS_PYTHON_2: data = bytearray(data) return data<|docstring|>Read a byte frame from input stream Args: nbb: Number of bytes to read Returns: Byte array<|endoftext|>
e834c5008d48160d8056e849070d147364f5c887d3b3452627940c31fdcee33e
def _read_error_message(self): ' Read stream to search for error line end. Called when wrong input is detected,\n to try to read an "Assertion failed" message for example.\n Returns:\n Byte array\n ' data = [] bv = self.pin.read(1) if IS_PYTHON_2: while ((bv != '') and (bv != '\n')): data.append(ord(bv)) bv = self.pin.read(1) data = bytearray(data) else: while ((bv != b'') and (bv != b'\n')): data.append(ord(bv)) bv = self.pin.read(1) return bytearray(data)
Read stream to search for error line end. Called when wrong input is detected, to try to read an "Assertion failed" message for example. Returns: Byte array
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_read_error_message
Infinity8sailor/Quantum-CERN
1
python
def _read_error_message(self): ' Read stream to search for error line end. Called when wrong input is detected,\n to try to read an "Assertion failed" message for example.\n Returns:\n Byte array\n ' data = [] bv = self.pin.read(1) if IS_PYTHON_2: while ((bv != ) and (bv != '\n')): data.append(ord(bv)) bv = self.pin.read(1) data = bytearray(data) else: while ((bv != b) and (bv != b'\n')): data.append(ord(bv)) bv = self.pin.read(1) return bytearray(data)
def _read_error_message(self): ' Read stream to search for error line end. Called when wrong input is detected,\n to try to read an "Assertion failed" message for example.\n Returns:\n Byte array\n ' data = [] bv = self.pin.read(1) if IS_PYTHON_2: while ((bv != ) and (bv != '\n')): data.append(ord(bv)) bv = self.pin.read(1) data = bytearray(data) else: while ((bv != b) and (bv != b'\n')): data.append(ord(bv)) bv = self.pin.read(1) return bytearray(data)<|docstring|>Read stream to search for error line end. Called when wrong input is detected, to try to read an "Assertion failed" message for example. Returns: Byte array<|endoftext|>
e46d7a2298c948de3be8b73f95e356b19161bb195f025d4b6a61f3b20c63f463
def _send_model_to_solver(self, cpostr): ' Send the model to the solver.\n\n Args:\n copstr: String containing the model in CPO format\n ' self._write_message(CMD_SET_CPO_MODEL, cpostr) self._wait_json_result(EVT_SUCCESS)
Send the model to the solver. Args: copstr: String containing the model in CPO format
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_send_model_to_solver
Infinity8sailor/Quantum-CERN
1
python
def _send_model_to_solver(self, cpostr): ' Send the model to the solver.\n\n Args:\n copstr: String containing the model in CPO format\n ' self._write_message(CMD_SET_CPO_MODEL, cpostr) self._wait_json_result(EVT_SUCCESS)
def _send_model_to_solver(self, cpostr): ' Send the model to the solver.\n\n Args:\n copstr: String containing the model in CPO format\n ' self._write_message(CMD_SET_CPO_MODEL, cpostr) self._wait_json_result(EVT_SUCCESS)<|docstring|>Send the model to the solver. Args: copstr: String containing the model in CPO format<|endoftext|>
96ba670e8d9a17f017ce2039ab7e6ae9ede1741f3606206d65141cccede88448
def _add_callback_processing(self): ' Add the processing of solver callback.\n ' aver = self.version_info.get('AngelVersion', 0) if (aver < 8): raise CpoSolverException('This version of the CPO solver angel ({}) does not support solver callbacks.'.format(aver)) self._write_message(CMD_ADD_CALLBACK) self._wait_event(EVT_SUCCESS)
Add the processing of solver callback.
venv/Lib/site-packages/docplex/cp/solver/solver_local.py
_add_callback_processing
Infinity8sailor/Quantum-CERN
1
python
def _add_callback_processing(self): ' \n ' aver = self.version_info.get('AngelVersion', 0) if (aver < 8): raise CpoSolverException('This version of the CPO solver angel ({}) does not support solver callbacks.'.format(aver)) self._write_message(CMD_ADD_CALLBACK) self._wait_event(EVT_SUCCESS)
def _add_callback_processing(self): ' \n ' aver = self.version_info.get('AngelVersion', 0) if (aver < 8): raise CpoSolverException('This version of the CPO solver angel ({}) does not support solver callbacks.'.format(aver)) self._write_message(CMD_ADD_CALLBACK) self._wait_event(EVT_SUCCESS)<|docstring|>Add the processing of solver callback.<|endoftext|>
5700c1e2e002afba451a44bef5add284df23723e9361f3802b24f093f59ad60b
def _require_openssl(): 'Check that ``openssl`` is on the PATH.\n\n Assumes :func:`_require_py` has been checked.\n ' if (py.path.local.sysfind('openssl') is None): msg = '``openssl`` command line tool must be installed.' print(msg, file=sys.stderr) sys.exit(1)
Check that ``openssl`` is on the PATH. Assumes :func:`_require_py` has been checked.
convert_key.py
_require_openssl
dhermes/google-cloud-python-on-gae
0
python
def _require_openssl(): 'Check that ``openssl`` is on the PATH.\n\n Assumes :func:`_require_py` has been checked.\n ' if (py.path.local.sysfind('openssl') is None): msg = '``openssl`` command line tool must be installed.' print(msg, file=sys.stderr) sys.exit(1)
def _require_openssl(): 'Check that ``openssl`` is on the PATH.\n\n Assumes :func:`_require_py` has been checked.\n ' if (py.path.local.sysfind('openssl') is None): msg = '``openssl`` command line tool must be installed.' print(msg, file=sys.stderr) sys.exit(1)<|docstring|>Check that ``openssl`` is on the PATH. Assumes :func:`_require_py` has been checked.<|endoftext|>
54413d9c0fbda04b23b88a4a70b997c20b0a8e9cd8bff73fce24329ac9bbf76c
def _pkcs8_filename(pkcs8_pem, base): 'Create / check a PKCS#8 file.\n\n Exits with 1 if the file already exists and differs from\n ``pkcs8_pem``. If the file does not exists, creates it with\n ``pkcs8_pem`` as contents and sets permissions to 0400.\n\n Args:\n pkcs8_pem (str): The contents to be stored (or checked).\n base (str): The base file path (without extension).\n\n Returns:\n str: The filename that was checked / created.\n ' pkcs8_filename = '{}-PKCS8.pem'.format(base) if os.path.exists(pkcs8_filename): with open(pkcs8_filename, 'r') as file_obj: contents = file_obj.read() if (contents != pkcs8_pem): msg = 'PKCS#8 file {} already exists.'.format(pkcs8_filename) print(msg, file=sys.stderr) sys.exit(1) else: with open(pkcs8_filename, 'w') as file_obj: file_obj.write(pkcs8_pem) os.chmod(pkcs8_filename, 256) return pkcs8_filename
Create / check a PKCS#8 file. Exits with 1 if the file already exists and differs from ``pkcs8_pem``. If the file does not exists, creates it with ``pkcs8_pem`` as contents and sets permissions to 0400. Args: pkcs8_pem (str): The contents to be stored (or checked). base (str): The base file path (without extension). Returns: str: The filename that was checked / created.
convert_key.py
_pkcs8_filename
dhermes/google-cloud-python-on-gae
0
python
def _pkcs8_filename(pkcs8_pem, base): 'Create / check a PKCS#8 file.\n\n Exits with 1 if the file already exists and differs from\n ``pkcs8_pem``. If the file does not exists, creates it with\n ``pkcs8_pem`` as contents and sets permissions to 0400.\n\n Args:\n pkcs8_pem (str): The contents to be stored (or checked).\n base (str): The base file path (without extension).\n\n Returns:\n str: The filename that was checked / created.\n ' pkcs8_filename = '{}-PKCS8.pem'.format(base) if os.path.exists(pkcs8_filename): with open(pkcs8_filename, 'r') as file_obj: contents = file_obj.read() if (contents != pkcs8_pem): msg = 'PKCS#8 file {} already exists.'.format(pkcs8_filename) print(msg, file=sys.stderr) sys.exit(1) else: with open(pkcs8_filename, 'w') as file_obj: file_obj.write(pkcs8_pem) os.chmod(pkcs8_filename, 256) return pkcs8_filename
def _pkcs8_filename(pkcs8_pem, base): 'Create / check a PKCS#8 file.\n\n Exits with 1 if the file already exists and differs from\n ``pkcs8_pem``. If the file does not exists, creates it with\n ``pkcs8_pem`` as contents and sets permissions to 0400.\n\n Args:\n pkcs8_pem (str): The contents to be stored (or checked).\n base (str): The base file path (without extension).\n\n Returns:\n str: The filename that was checked / created.\n ' pkcs8_filename = '{}-PKCS8.pem'.format(base) if os.path.exists(pkcs8_filename): with open(pkcs8_filename, 'r') as file_obj: contents = file_obj.read() if (contents != pkcs8_pem): msg = 'PKCS#8 file {} already exists.'.format(pkcs8_filename) print(msg, file=sys.stderr) sys.exit(1) else: with open(pkcs8_filename, 'w') as file_obj: file_obj.write(pkcs8_pem) os.chmod(pkcs8_filename, 256) return pkcs8_filename<|docstring|>Create / check a PKCS#8 file. Exits with 1 if the file already exists and differs from ``pkcs8_pem``. If the file does not exists, creates it with ``pkcs8_pem`` as contents and sets permissions to 0400. Args: pkcs8_pem (str): The contents to be stored (or checked). base (str): The base file path (without extension). Returns: str: The filename that was checked / created.<|endoftext|>
d53e4c4f7acb6c286de441865780a46c43afc7dfb8449459b490c5f4e7081b04
def _pkcs1_verify(pkcs8_filename, pkcs1_filename): 'Verify the contents of an existing PKCS#1 file.\n\n Does so by using ``openssl rsa`` to print to stdout and\n then checking against contents.\n\n Exits with 1 if:\n\n * The ``openssl`` command fails\n * The ``pkcs1_filename`` contents differ from what was produced\n by ``openssl``\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to check against.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed checking contents of {} against openssl.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1) cmd_output = process.stdout.read().decode('utf-8') with open(pkcs1_filename, 'r') as file_obj: expected_contents = file_obj.read() if (cmd_output != expected_contents): msg = 'PKCS#1 file {} already exists.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)
Verify the contents of an existing PKCS#1 file. Does so by using ``openssl rsa`` to print to stdout and then checking against contents. Exits with 1 if: * The ``openssl`` command fails * The ``pkcs1_filename`` contents differ from what was produced by ``openssl`` Args: pkcs8_filename (str): The PKCS#8 file to be converted. pkcs1_filename (str): The PKCS#1 file to check against.
convert_key.py
_pkcs1_verify
dhermes/google-cloud-python-on-gae
0
python
def _pkcs1_verify(pkcs8_filename, pkcs1_filename): 'Verify the contents of an existing PKCS#1 file.\n\n Does so by using ``openssl rsa`` to print to stdout and\n then checking against contents.\n\n Exits with 1 if:\n\n * The ``openssl`` command fails\n * The ``pkcs1_filename`` contents differ from what was produced\n by ``openssl``\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to check against.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed checking contents of {} against openssl.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1) cmd_output = process.stdout.read().decode('utf-8') with open(pkcs1_filename, 'r') as file_obj: expected_contents = file_obj.read() if (cmd_output != expected_contents): msg = 'PKCS#1 file {} already exists.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)
def _pkcs1_verify(pkcs8_filename, pkcs1_filename): 'Verify the contents of an existing PKCS#1 file.\n\n Does so by using ``openssl rsa`` to print to stdout and\n then checking against contents.\n\n Exits with 1 if:\n\n * The ``openssl`` command fails\n * The ``pkcs1_filename`` contents differ from what was produced\n by ``openssl``\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to check against.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed checking contents of {} against openssl.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1) cmd_output = process.stdout.read().decode('utf-8') with open(pkcs1_filename, 'r') as file_obj: expected_contents = file_obj.read() if (cmd_output != expected_contents): msg = 'PKCS#1 file {} already exists.'.format(pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)<|docstring|>Verify the contents of an existing PKCS#1 file. Does so by using ``openssl rsa`` to print to stdout and then checking against contents. Exits with 1 if: * The ``openssl`` command fails * The ``pkcs1_filename`` contents differ from what was produced by ``openssl`` Args: pkcs8_filename (str): The PKCS#8 file to be converted. pkcs1_filename (str): The PKCS#1 file to check against.<|endoftext|>
d0e03c4d37929592a2e405281a96e746718459f2d61beed7e846c4c2579f6c6e
def _pkcs1_create(pkcs8_filename, pkcs1_filename): 'Create a existing PKCS#1 file from a PKCS#8 file.\n\n Does so by using ``openssl rsa -in * -out *``.\n\n Exits with 1 if the ``openssl`` command fails.\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to be created.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename, '-out', pkcs1_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed to convert {} to {} with openssl.'.format(pkcs8_filename, pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)
Create a existing PKCS#1 file from a PKCS#8 file. Does so by using ``openssl rsa -in * -out *``. Exits with 1 if the ``openssl`` command fails. Args: pkcs8_filename (str): The PKCS#8 file to be converted. pkcs1_filename (str): The PKCS#1 file to be created.
convert_key.py
_pkcs1_create
dhermes/google-cloud-python-on-gae
0
python
def _pkcs1_create(pkcs8_filename, pkcs1_filename): 'Create a existing PKCS#1 file from a PKCS#8 file.\n\n Does so by using ``openssl rsa -in * -out *``.\n\n Exits with 1 if the ``openssl`` command fails.\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to be created.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename, '-out', pkcs1_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed to convert {} to {} with openssl.'.format(pkcs8_filename, pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)
def _pkcs1_create(pkcs8_filename, pkcs1_filename): 'Create a existing PKCS#1 file from a PKCS#8 file.\n\n Does so by using ``openssl rsa -in * -out *``.\n\n Exits with 1 if the ``openssl`` command fails.\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to be created.\n ' cmd = ('openssl', 'rsa', '-in', pkcs8_filename, '-out', pkcs1_filename) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = process.wait() if (return_code != 0): msg = 'Failed to convert {} to {} with openssl.'.format(pkcs8_filename, pkcs1_filename) print(msg, file=sys.stderr) sys.exit(1)<|docstring|>Create a existing PKCS#1 file from a PKCS#8 file. Does so by using ``openssl rsa -in * -out *``. Exits with 1 if the ``openssl`` command fails. Args: pkcs8_filename (str): The PKCS#8 file to be converted. pkcs1_filename (str): The PKCS#1 file to be created.<|endoftext|>
85cf99de762a9652ece190be9466a3e4b5f52805ac15e5ccd44f1c71eb98f9a9
def transcribe(fp, decoder): '\n Performs STT, transcribing an audio file and returning the result.\n\n Arguments:\n fp -- a file object containing audio data\n ' fp.seek(44) data = fp.read() decoder.start_utt() decoder.process_raw(data, False, True) decoder.end_utt() result = decoder.hyp() transcribed = [result] print(transcribed[0]) return transcribed
Performs STT, transcribing an audio file and returning the result. Arguments: fp -- a file object containing audio data
jasper_test.py
transcribe
codebhendi/alfred-bot
0
python
def transcribe(fp, decoder): '\n Performs STT, transcribing an audio file and returning the result.\n\n Arguments:\n fp -- a file object containing audio data\n ' fp.seek(44) data = fp.read() decoder.start_utt() decoder.process_raw(data, False, True) decoder.end_utt() result = decoder.hyp() transcribed = [result] print(transcribed[0]) return transcribed
def transcribe(fp, decoder): '\n Performs STT, transcribing an audio file and returning the result.\n\n Arguments:\n fp -- a file object containing audio data\n ' fp.seek(44) data = fp.read() decoder.start_utt() decoder.process_raw(data, False, True) decoder.end_utt() result = decoder.hyp() transcribed = [result] print(transcribed[0]) return transcribed<|docstring|>Performs STT, transcribing an audio file and returning the result. Arguments: fp -- a file object containing audio data<|endoftext|>
508c6737808bf43ed5d3150d737afb0e8d17bad5f56b9a69bda5804b933b15ee
def generate_CompetitionSince(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n Fills missing values with -1000.\n Creates a new boolean column 'Competition_missing' highlighting the missing values.\n " mask = (~ all_data.CompetitionOpenSinceYear.isna()) year = all_data.loc[(mask, 'CompetitionOpenSinceYear')].astype(np.int).astype(str) month = all_data.loc[(mask, 'CompetitionOpenSinceMonth')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] CompetitionSince = (now_date.dt.to_period('M') - pd.to_datetime(((year + '-') + month), format='%Y-%m').dt.to_period('M')) CompetitionSince = CompetitionSince.apply((lambda x: x.n)) all_data.loc[(mask, 'CompetitionSince')] = CompetitionSince all_data.loc[(:, 'Competition_missing')] = all_data.CompetitionSince.isna() all_data.CompetitionSince.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear'], axis=1, inplace=True)
Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since when the competition started. Fills missing values with -1000. Creates a new boolean column 'Competition_missing' highlighting the missing values.
feature_engineering.py
generate_CompetitionSince
ChristopherSD/dsr-minicomp
0
python
def generate_CompetitionSince(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n Fills missing values with -1000.\n Creates a new boolean column 'Competition_missing' highlighting the missing values.\n " mask = (~ all_data.CompetitionOpenSinceYear.isna()) year = all_data.loc[(mask, 'CompetitionOpenSinceYear')].astype(np.int).astype(str) month = all_data.loc[(mask, 'CompetitionOpenSinceMonth')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] CompetitionSince = (now_date.dt.to_period('M') - pd.to_datetime(((year + '-') + month), format='%Y-%m').dt.to_period('M')) CompetitionSince = CompetitionSince.apply((lambda x: x.n)) all_data.loc[(mask, 'CompetitionSince')] = CompetitionSince all_data.loc[(:, 'Competition_missing')] = all_data.CompetitionSince.isna() all_data.CompetitionSince.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear'], axis=1, inplace=True)
def generate_CompetitionSince(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n Fills missing values with -1000.\n Creates a new boolean column 'Competition_missing' highlighting the missing values.\n " mask = (~ all_data.CompetitionOpenSinceYear.isna()) year = all_data.loc[(mask, 'CompetitionOpenSinceYear')].astype(np.int).astype(str) month = all_data.loc[(mask, 'CompetitionOpenSinceMonth')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] CompetitionSince = (now_date.dt.to_period('M') - pd.to_datetime(((year + '-') + month), format='%Y-%m').dt.to_period('M')) CompetitionSince = CompetitionSince.apply((lambda x: x.n)) all_data.loc[(mask, 'CompetitionSince')] = CompetitionSince all_data.loc[(:, 'Competition_missing')] = all_data.CompetitionSince.isna() all_data.CompetitionSince.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear'], axis=1, inplace=True)<|docstring|>Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since when the competition started. Fills missing values with -1000. Creates a new boolean column 'Competition_missing' highlighting the missing values.<|endoftext|>
4bd87e0f8abb556ec7e166a9ce3fbfdbccb98e3a939994b7ce5734208e7ed646
def one_hot_encoder_fit_transform(df: pd.DataFrame, col_name: str): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df - DataFrame to transform\n col_name: name of the column that has to be transformed\n Returns:\n input DataFrame with concatenated, transformed column\n ' enc = OneHotEncoder(handle_unknown='ignore', sparse=False) enc.fit(df[col_name].values.reshape((- 1), 1)) return (one_hot_encoder_transform(df, col_name, enc), enc)
Function to fit and transform column in DataFrame with OneHotEncoder Args: df - DataFrame to transform col_name: name of the column that has to be transformed Returns: input DataFrame with concatenated, transformed column
feature_engineering.py
one_hot_encoder_fit_transform
ChristopherSD/dsr-minicomp
0
python
def one_hot_encoder_fit_transform(df: pd.DataFrame, col_name: str): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df - DataFrame to transform\n col_name: name of the column that has to be transformed\n Returns:\n input DataFrame with concatenated, transformed column\n ' enc = OneHotEncoder(handle_unknown='ignore', sparse=False) enc.fit(df[col_name].values.reshape((- 1), 1)) return (one_hot_encoder_transform(df, col_name, enc), enc)
def one_hot_encoder_fit_transform(df: pd.DataFrame, col_name: str): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df - DataFrame to transform\n col_name: name of the column that has to be transformed\n Returns:\n input DataFrame with concatenated, transformed column\n ' enc = OneHotEncoder(handle_unknown='ignore', sparse=False) enc.fit(df[col_name].values.reshape((- 1), 1)) return (one_hot_encoder_transform(df, col_name, enc), enc)<|docstring|>Function to fit and transform column in DataFrame with OneHotEncoder Args: df - DataFrame to transform col_name: name of the column that has to be transformed Returns: input DataFrame with concatenated, transformed column<|endoftext|>
6da5840082e646830a3039eb63ef0a29f37aca2e7370530fba15cd0885fb3ad9
def one_hot_encoder_transform(df: pd.DataFrame, col_name: str, enc): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df: DataFrame to transform\n col_name: name of the column that has to be transformed\n enc: instance of fitted OneHotEncoder\n Returns:\n input DataFrame with concatenated, transformed column\n ' encoded_column = pd.DataFrame(enc.transform(df[col_name].values.reshape((- 1), 1)), columns=[((col_name + '_') + str(item)) for item in range(len(enc.categories_[0]))], index=df.index) return pd.concat([df, encoded_column], axis=1).drop(col_name, axis=1)
Function to fit and transform column in DataFrame with OneHotEncoder Args: df: DataFrame to transform col_name: name of the column that has to be transformed enc: instance of fitted OneHotEncoder Returns: input DataFrame with concatenated, transformed column
feature_engineering.py
one_hot_encoder_transform
ChristopherSD/dsr-minicomp
0
python
def one_hot_encoder_transform(df: pd.DataFrame, col_name: str, enc): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df: DataFrame to transform\n col_name: name of the column that has to be transformed\n enc: instance of fitted OneHotEncoder\n Returns:\n input DataFrame with concatenated, transformed column\n ' encoded_column = pd.DataFrame(enc.transform(df[col_name].values.reshape((- 1), 1)), columns=[((col_name + '_') + str(item)) for item in range(len(enc.categories_[0]))], index=df.index) return pd.concat([df, encoded_column], axis=1).drop(col_name, axis=1)
def one_hot_encoder_transform(df: pd.DataFrame, col_name: str, enc): '\n Function to fit and transform column in DataFrame with OneHotEncoder\n\n Args:\n df: DataFrame to transform\n col_name: name of the column that has to be transformed\n enc: instance of fitted OneHotEncoder\n Returns:\n input DataFrame with concatenated, transformed column\n ' encoded_column = pd.DataFrame(enc.transform(df[col_name].values.reshape((- 1), 1)), columns=[((col_name + '_') + str(item)) for item in range(len(enc.categories_[0]))], index=df.index) return pd.concat([df, encoded_column], axis=1).drop(col_name, axis=1)<|docstring|>Function to fit and transform column in DataFrame with OneHotEncoder Args: df: DataFrame to transform col_name: name of the column that has to be transformed enc: instance of fitted OneHotEncoder Returns: input DataFrame with concatenated, transformed column<|endoftext|>
50a1cb2d99cd10bf6c0d0a6cf83cc6747abc48cb72efc28704c7955cd0643883
def is_StateHoliday(df): 'Generates a new boolean column, if it is a StateHoliday or not\n ' return (((df.StateHoliday == 'a') | (df.StateHoliday == 'b')) | (df.StateHoliday == 'c'))
Generates a new boolean column, if it is a StateHoliday or not
feature_engineering.py
is_StateHoliday
ChristopherSD/dsr-minicomp
0
python
def is_StateHoliday(df): '\n ' return (((df.StateHoliday == 'a') | (df.StateHoliday == 'b')) | (df.StateHoliday == 'c'))
def is_StateHoliday(df): '\n ' return (((df.StateHoliday == 'a') | (df.StateHoliday == 'b')) | (df.StateHoliday == 'c'))<|docstring|>Generates a new boolean column, if it is a StateHoliday or not<|endoftext|>
165949eb107c9cdbc8323628a6566047b6d428bef4ba32fd53d3f747529bcff4
def is_SchoolHoliday(df): 'Generates a new boolean column, if it is a StateHoliday or not\n ' return ((((df.SchoolHoliday == '1') | (df.SchoolHoliday == 1)) | (df.SchoolHoliday == '1.0')) | (df.SchoolHoliday == 1.0))
Generates a new boolean column, if it is a StateHoliday or not
feature_engineering.py
is_SchoolHoliday
ChristopherSD/dsr-minicomp
0
python
def is_SchoolHoliday(df): '\n ' return ((((df.SchoolHoliday == '1') | (df.SchoolHoliday == 1)) | (df.SchoolHoliday == '1.0')) | (df.SchoolHoliday == 1.0))
def is_SchoolHoliday(df): '\n ' return ((((df.SchoolHoliday == '1') | (df.SchoolHoliday == 1)) | (df.SchoolHoliday == '1.0')) | (df.SchoolHoliday == 1.0))<|docstring|>Generates a new boolean column, if it is a StateHoliday or not<|endoftext|>
64a1b2c1b057d20523727c87388c1e8fca88e8ac527f456aa9110edf02f7db8f
def log_transform(inp: pd.Series): '\n Function to log transform - takes care of negative and 0 values.\n\n Args:\n inp - pd.Series to log transform\n Returns:\n transformed pd.Series\n ' x = pd.Series() x = ((inp - inp.min()) + 1) return np.log(x)
Function to log transform - takes care of negative and 0 values. Args: inp - pd.Series to log transform Returns: transformed pd.Series
feature_engineering.py
log_transform
ChristopherSD/dsr-minicomp
0
python
def log_transform(inp: pd.Series): '\n Function to log transform - takes care of negative and 0 values.\n\n Args:\n inp - pd.Series to log transform\n Returns:\n transformed pd.Series\n ' x = pd.Series() x = ((inp - inp.min()) + 1) return np.log(x)
def log_transform(inp: pd.Series): '\n Function to log transform - takes care of negative and 0 values.\n\n Args:\n inp - pd.Series to log transform\n Returns:\n transformed pd.Series\n ' x = pd.Series() x = ((inp - inp.min()) + 1) return np.log(x)<|docstring|>Function to log transform - takes care of negative and 0 values. Args: inp - pd.Series to log transform Returns: transformed pd.Series<|endoftext|>
a78b252ffb0944b77676d1b12eb431b65a4bf353c9121b01a3edb777e4227d68
def generate_PromoStarted(all_data: pd.DataFrame, drop=True, itvl_col='PromoInterval'): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n " new_col_name = 'PromoStarted' promo_started = all_data.apply(is_in_promo_month, axis=1) all_data[new_col_name] = promo_started if drop: all_data.drop(labels=[itvl_col], axis=1, inplace=True)
Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since when the competition started.
feature_engineering.py
generate_PromoStarted
ChristopherSD/dsr-minicomp
0
python
def generate_PromoStarted(all_data: pd.DataFrame, drop=True, itvl_col='PromoInterval'): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n " new_col_name = 'PromoStarted' promo_started = all_data.apply(is_in_promo_month, axis=1) all_data[new_col_name] = promo_started if drop: all_data.drop(labels=[itvl_col], axis=1, inplace=True)
def generate_PromoStarted(all_data: pd.DataFrame, drop=True, itvl_col='PromoInterval'): "Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since\n when the competition started.\n " new_col_name = 'PromoStarted' promo_started = all_data.apply(is_in_promo_month, axis=1) all_data[new_col_name] = promo_started if drop: all_data.drop(labels=[itvl_col], axis=1, inplace=True)<|docstring|>Generate (inplace) a feature 'CompetitionSince' which counts the months (in integer) since when the competition started.<|endoftext|>
dcc61565b9d85ec221badedd94e73a05716d08793a939a11658092e56e267fe5
def generate_Promo2SinceNWeeks(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'Promo2SinceNWeeks' which counts the weeks (in integer) since\n when a Promo2 started.\n Fills missing values with -1000.\n Creates a new boolean column 'Promo2SinceNWeeks_missing' highlighting the missing values.\n " mask = (~ all_data.Promo2SinceYear.isna()) year = all_data.loc[(mask, 'Promo2SinceYear')].astype(np.int).astype(str) week = all_data.loc[(mask, 'Promo2SinceWeek')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] Promo2SinceNWeeks = (now_date.dt.to_period('W') - pd.to_datetime((((year + '-') + week) + '0'), format='%Y-%W%w').dt.to_period('W')) Promo2SinceNWeeks = Promo2SinceNWeeks.apply((lambda x: x.n)) all_data.loc[(mask, 'Promo2SinceNWeeks')] = Promo2SinceNWeeks all_data.loc[(:, 'Promo2SinceNWeeks_missing')] = all_data.Promo2SinceYear.isna() all_data.Promo2SinceNWeeks.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['Promo2SinceYear', 'Promo2SinceWeek'], axis=1, inplace=True)
Generate (inplace) a feature 'Promo2SinceNWeeks' which counts the weeks (in integer) since when a Promo2 started. Fills missing values with -1000. Creates a new boolean column 'Promo2SinceNWeeks_missing' highlighting the missing values.
feature_engineering.py
generate_Promo2SinceNWeeks
ChristopherSD/dsr-minicomp
0
python
def generate_Promo2SinceNWeeks(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'Promo2SinceNWeeks' which counts the weeks (in integer) since\n when a Promo2 started.\n Fills missing values with -1000.\n Creates a new boolean column 'Promo2SinceNWeeks_missing' highlighting the missing values.\n " mask = (~ all_data.Promo2SinceYear.isna()) year = all_data.loc[(mask, 'Promo2SinceYear')].astype(np.int).astype(str) week = all_data.loc[(mask, 'Promo2SinceWeek')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] Promo2SinceNWeeks = (now_date.dt.to_period('W') - pd.to_datetime((((year + '-') + week) + '0'), format='%Y-%W%w').dt.to_period('W')) Promo2SinceNWeeks = Promo2SinceNWeeks.apply((lambda x: x.n)) all_data.loc[(mask, 'Promo2SinceNWeeks')] = Promo2SinceNWeeks all_data.loc[(:, 'Promo2SinceNWeeks_missing')] = all_data.Promo2SinceYear.isna() all_data.Promo2SinceNWeeks.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['Promo2SinceYear', 'Promo2SinceWeek'], axis=1, inplace=True)
def generate_Promo2SinceNWeeks(all_data: pd.DataFrame, drop=True): "Generate (inplace) a feature 'Promo2SinceNWeeks' which counts the weeks (in integer) since\n when a Promo2 started.\n Fills missing values with -1000.\n Creates a new boolean column 'Promo2SinceNWeeks_missing' highlighting the missing values.\n " mask = (~ all_data.Promo2SinceYear.isna()) year = all_data.loc[(mask, 'Promo2SinceYear')].astype(np.int).astype(str) week = all_data.loc[(mask, 'Promo2SinceWeek')].astype(np.int).apply('{:02d}'.format) now_date = all_data.loc[(mask, 'Date')] Promo2SinceNWeeks = (now_date.dt.to_period('W') - pd.to_datetime((((year + '-') + week) + '0'), format='%Y-%W%w').dt.to_period('W')) Promo2SinceNWeeks = Promo2SinceNWeeks.apply((lambda x: x.n)) all_data.loc[(mask, 'Promo2SinceNWeeks')] = Promo2SinceNWeeks all_data.loc[(:, 'Promo2SinceNWeeks_missing')] = all_data.Promo2SinceYear.isna() all_data.Promo2SinceNWeeks.fillna((- 1000), inplace=True) if drop: all_data.drop(labels=['Promo2SinceYear', 'Promo2SinceWeek'], axis=1, inplace=True)<|docstring|>Generate (inplace) a feature 'Promo2SinceNWeeks' which counts the weeks (in integer) since when a Promo2 started. Fills missing values with -1000. Creates a new boolean column 'Promo2SinceNWeeks_missing' highlighting the missing values.<|endoftext|>
0c19440ed5ff0c63111ffe6d684a3c6a943097f645b1f78a4ad366543f6fcf21
def generate_col_month(df): 'Generates a new feature "month"\n ' month = df.Date.dt.month return month
Generates a new feature "month"
feature_engineering.py
generate_col_month
ChristopherSD/dsr-minicomp
0
python
def generate_col_month(df): '\n ' month = df.Date.dt.month return month
def generate_col_month(df): '\n ' month = df.Date.dt.month return month<|docstring|>Generates a new feature "month"<|endoftext|>
ff742b6e4c8e42cd97c7d9a57057129a9653e2a67711661418ee1f03164a6856
def target_encode_Stores(df, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df['Store'].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, 'Store')] = new_store return (new_store, enc)
Target encode the Store variable using the category_encoders module Args: df: Data enc: Existing Encoder / if None retrain new encoder
feature_engineering.py
target_encode_Stores
ChristopherSD/dsr-minicomp
0
python
def target_encode_Stores(df, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df['Store'].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, 'Store')] = new_store return (new_store, enc)
def target_encode_Stores(df, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df['Store'].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, 'Store')] = new_store return (new_store, enc)<|docstring|>Target encode the Store variable using the category_encoders module Args: df: Data enc: Existing Encoder / if None retrain new encoder<|endoftext|>
73da9aa9963ac63a0c695117408263d7f183d0c9b54022e45ed7ddc2487c4398
def target_encode_custom(df: pd.DataFrame, name: str, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n name (str): name of the column to encode\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df[name].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, name)] = new_store return (new_store, enc)
Target encode the Store variable using the category_encoders module Args: df: Data name (str): name of the column to encode enc: Existing Encoder / if None retrain new encoder
feature_engineering.py
target_encode_custom
ChristopherSD/dsr-minicomp
0
python
def target_encode_custom(df: pd.DataFrame, name: str, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n name (str): name of the column to encode\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df[name].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, name)] = new_store return (new_store, enc)
def target_encode_custom(df: pd.DataFrame, name: str, enc=None): 'Target encode the Store variable using the category_encoders module\n\n Args:\n df: Data\n name (str): name of the column to encode\n enc: Existing Encoder / if None retrain new encoder\n ' target = df['Sales'].values stores = df[name].astype(str) if (not enc): print('Fit TargetEncoder...') enc = TargetEncoder() new_store = enc.fit_transform(stores, target) else: print('Transform using existing TargetEncoder...') new_store = enc.transform(stores, target) df.loc[(:, name)] = new_store return (new_store, enc)<|docstring|>Target encode the Store variable using the category_encoders module Args: df: Data name (str): name of the column to encode enc: Existing Encoder / if None retrain new encoder<|endoftext|>
c00f0e978d084a3ea0fe4845ef49fd8d1f1ec7697013944c70a0108d7ae8ebb3
def generate_cyclic_feature_month(df): 'Generates a new feature "month"\n ' sin_month = np.sin((((df.Date.dt.month / 12) * 2) * np.pi)) cos_month = np.cos((((df.Date.dt.month / 12) * 2) * np.pi)) sin_month = sin_month.reindex(df.index) cos_month = cos_month.reindex(df.index) return (sin_month, cos_month)
Generates a new feature "month"
feature_engineering.py
generate_cyclic_feature_month
ChristopherSD/dsr-minicomp
0
python
def generate_cyclic_feature_month(df): '\n ' sin_month = np.sin((((df.Date.dt.month / 12) * 2) * np.pi)) cos_month = np.cos((((df.Date.dt.month / 12) * 2) * np.pi)) sin_month = sin_month.reindex(df.index) cos_month = cos_month.reindex(df.index) return (sin_month, cos_month)
def generate_cyclic_feature_month(df): '\n ' sin_month = np.sin((((df.Date.dt.month / 12) * 2) * np.pi)) cos_month = np.cos((((df.Date.dt.month / 12) * 2) * np.pi)) sin_month = sin_month.reindex(df.index) cos_month = cos_month.reindex(df.index) return (sin_month, cos_month)<|docstring|>Generates a new feature "month"<|endoftext|>
49df06f2f337e0cba0c08326a7be31046c25267c603f5d81c16cdca3eecd22f1
def generate_cyclic_feature_week(df): 'Generates a new feature "week"\n ' sin_week = np.sin((((df.Date.dt.week / 52) * 2) * np.pi)) cos_week = np.cos((((df.Date.dt.week / 52) * 2) * np.pi)) sin_week = sin_week.reindex(df.index) cos_week = cos_week.reindex(df.index) return (sin_week, cos_week)
Generates a new feature "week"
feature_engineering.py
generate_cyclic_feature_week
ChristopherSD/dsr-minicomp
0
python
def generate_cyclic_feature_week(df): '\n ' sin_week = np.sin((((df.Date.dt.week / 52) * 2) * np.pi)) cos_week = np.cos((((df.Date.dt.week / 52) * 2) * np.pi)) sin_week = sin_week.reindex(df.index) cos_week = cos_week.reindex(df.index) return (sin_week, cos_week)
def generate_cyclic_feature_week(df): '\n ' sin_week = np.sin((((df.Date.dt.week / 52) * 2) * np.pi)) cos_week = np.cos((((df.Date.dt.week / 52) * 2) * np.pi)) sin_week = sin_week.reindex(df.index) cos_week = cos_week.reindex(df.index) return (sin_week, cos_week)<|docstring|>Generates a new feature "week"<|endoftext|>
1ebaa270c20b3b515317feb1c9d76ccfc8589ecd03249e4a3658dd312430579e
def my_impute_data(data): "Custom function for Michael's Model\n " df = data.copy() df.Promo2.fillna('unknown', inplace=True) impute_competition_distance = df.CompetitionDistance.median() df.CompetitionDistance.fillna(impute_competition_distance, inplace=True) generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) df.drop(labels=['PromoInterval'], axis=1, inplace=True) new_col = df[['StateHoliday']].apply((lambda x: (x['StateHoliday'] if (x['StateHoliday'] in ['a', 'b', 'c']) else '0')), axis=1) df.StateHoliday = new_col return df
Custom function for Michael's Model
feature_engineering.py
my_impute_data
ChristopherSD/dsr-minicomp
0
python
def my_impute_data(data): "\n " df = data.copy() df.Promo2.fillna('unknown', inplace=True) impute_competition_distance = df.CompetitionDistance.median() df.CompetitionDistance.fillna(impute_competition_distance, inplace=True) generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) df.drop(labels=['PromoInterval'], axis=1, inplace=True) new_col = df[['StateHoliday']].apply((lambda x: (x['StateHoliday'] if (x['StateHoliday'] in ['a', 'b', 'c']) else '0')), axis=1) df.StateHoliday = new_col return df
def my_impute_data(data): "\n " df = data.copy() df.Promo2.fillna('unknown', inplace=True) impute_competition_distance = df.CompetitionDistance.median() df.CompetitionDistance.fillna(impute_competition_distance, inplace=True) generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) df.drop(labels=['PromoInterval'], axis=1, inplace=True) new_col = df[['StateHoliday']].apply((lambda x: (x['StateHoliday'] if (x['StateHoliday'] in ['a', 'b', 'c']) else '0')), axis=1) df.StateHoliday = new_col return df<|docstring|>Custom function for Michael's Model<|endoftext|>
81f899948c53fd98c8dfdd5f67dc8d0c7e20a104e798c04e49e85cfd833738ff
def massive_onehot(input_df, enc=None): "Apply OnehotEncoder to columns:\n 'Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'\n " data = input_df.copy() cat_features = ['Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'] for c in cat_features: data[c] = data[c].replace(0, '0').replace(1, '1') if (not enc): print('Fit OneHotEncoder...') enc = OneHotEncoder() new_cat = pd.DataFrame(enc.fit_transform(data[cat_features].astype(str)).toarray()) else: print('Transform using existing OneHotEncoder...') enc = enc new_cat = pd.DataFrame(enc.transform(data[cat_features].astype(str)).toarray()) new_cat.columns = enc.get_feature_names() new_cat.index = data.index data = data.join(new_cat).drop(cat_features, axis=1) return (data, enc)
Apply OnehotEncoder to columns: 'Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'
feature_engineering.py
massive_onehot
ChristopherSD/dsr-minicomp
0
python
def massive_onehot(input_df, enc=None): "Apply OnehotEncoder to columns:\n 'Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'\n " data = input_df.copy() cat_features = ['Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'] for c in cat_features: data[c] = data[c].replace(0, '0').replace(1, '1') if (not enc): print('Fit OneHotEncoder...') enc = OneHotEncoder() new_cat = pd.DataFrame(enc.fit_transform(data[cat_features].astype(str)).toarray()) else: print('Transform using existing OneHotEncoder...') enc = enc new_cat = pd.DataFrame(enc.transform(data[cat_features].astype(str)).toarray()) new_cat.columns = enc.get_feature_names() new_cat.index = data.index data = data.join(new_cat).drop(cat_features, axis=1) return (data, enc)
def massive_onehot(input_df, enc=None): "Apply OnehotEncoder to columns:\n 'Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'\n " data = input_df.copy() cat_features = ['Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'] for c in cat_features: data[c] = data[c].replace(0, '0').replace(1, '1') if (not enc): print('Fit OneHotEncoder...') enc = OneHotEncoder() new_cat = pd.DataFrame(enc.fit_transform(data[cat_features].astype(str)).toarray()) else: print('Transform using existing OneHotEncoder...') enc = enc new_cat = pd.DataFrame(enc.transform(data[cat_features].astype(str)).toarray()) new_cat.columns = enc.get_feature_names() new_cat.index = data.index data = data.join(new_cat).drop(cat_features, axis=1) return (data, enc)<|docstring|>Apply OnehotEncoder to columns: 'Promo', 'SchoolHoliday', 'StateHoliday', 'StoreType', 'Assortment', 'Promo2'<|endoftext|>
0fbf4d1bc6844db3dcad3db163d75bf74cfa33fcffcf12efd7f7b395647178c7
def my_preprocess_data(data, oneh_enc=None, target_enc=None): " Data preprocessing function for Michael's Model\n " data = create_basetable(data) data = my_impute_data(data) (data, oneh_enc) = massive_onehot(data, oneh_enc) (new_store, target_enc) = target_encode_Stores(data, target_enc) (sin_month, cos_month) = generate_cyclic_feature_month(data) data['sin_month'] = sin_month data['cos_month'] = cos_month data = data.drop(['Date'], axis=1) return (data, oneh_enc, target_enc)
Data preprocessing function for Michael's Model
feature_engineering.py
my_preprocess_data
ChristopherSD/dsr-minicomp
0
python
def my_preprocess_data(data, oneh_enc=None, target_enc=None): " \n " data = create_basetable(data) data = my_impute_data(data) (data, oneh_enc) = massive_onehot(data, oneh_enc) (new_store, target_enc) = target_encode_Stores(data, target_enc) (sin_month, cos_month) = generate_cyclic_feature_month(data) data['sin_month'] = sin_month data['cos_month'] = cos_month data = data.drop(['Date'], axis=1) return (data, oneh_enc, target_enc)
def my_preprocess_data(data, oneh_enc=None, target_enc=None): " \n " data = create_basetable(data) data = my_impute_data(data) (data, oneh_enc) = massive_onehot(data, oneh_enc) (new_store, target_enc) = target_encode_Stores(data, target_enc) (sin_month, cos_month) = generate_cyclic_feature_month(data) data['sin_month'] = sin_month data['cos_month'] = cos_month data = data.drop(['Date'], axis=1) return (data, oneh_enc, target_enc)<|docstring|>Data preprocessing function for Michael's Model<|endoftext|>
f88027b082a39984d0da0ee03f0fc3fef33b0c65838f88804d5c707910eb9d3e
def prepare_data_for_model_ti(): "\n Data pre-processing for Tom's model\n " train_vars = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2', 'Competition_missing', 'Promo2SinceNWeeks_missing', 'PromoStarted', 'Store', 'sin_week', 'cos_week', 'sin_month', 'cos_month', 'Customers_log', 'CompetitionDistance_log', 'CompetitionSince_log', 'Promo2SinceNWeeks_log'] enc_var_list = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] raw_train = get_all_train_data() raw_test = get_all_test_data() df_train_imputed = create_basetable(raw_train, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_test_imputed = create_basetable(raw_test, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_train = df_train_imputed.copy() df_test = df_test_imputed.copy() y_test = df_test['Sales'] custom_transformer_ti(df_train) custom_transformer_ti(df_test) (new_col, target_encoder) = target_encode_Stores(df_train) df_train['Store'] = new_col (new_col, _) = target_encode_Stores(df_test, target_encoder) df_test['Store'] = new_col df_train = df_train[train_vars] df_test = df_test[train_vars] for col in enc_var_list: (df_train, one_hot_encoder) = one_hot_encoder_fit_transform(df_train, col) df_test = one_hot_encoder_transform(df_test, col, one_hot_encoder) return (df_test, y_test)
Data pre-processing for Tom's model
feature_engineering.py
prepare_data_for_model_ti
ChristopherSD/dsr-minicomp
0
python
def prepare_data_for_model_ti(): "\n \n " train_vars = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2', 'Competition_missing', 'Promo2SinceNWeeks_missing', 'PromoStarted', 'Store', 'sin_week', 'cos_week', 'sin_month', 'cos_month', 'Customers_log', 'CompetitionDistance_log', 'CompetitionSince_log', 'Promo2SinceNWeeks_log'] enc_var_list = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] raw_train = get_all_train_data() raw_test = get_all_test_data() df_train_imputed = create_basetable(raw_train, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_test_imputed = create_basetable(raw_test, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_train = df_train_imputed.copy() df_test = df_test_imputed.copy() y_test = df_test['Sales'] custom_transformer_ti(df_train) custom_transformer_ti(df_test) (new_col, target_encoder) = target_encode_Stores(df_train) df_train['Store'] = new_col (new_col, _) = target_encode_Stores(df_test, target_encoder) df_test['Store'] = new_col df_train = df_train[train_vars] df_test = df_test[train_vars] for col in enc_var_list: (df_train, one_hot_encoder) = one_hot_encoder_fit_transform(df_train, col) df_test = one_hot_encoder_transform(df_test, col, one_hot_encoder) return (df_test, y_test)
def prepare_data_for_model_ti(): "\n \n " train_vars = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2', 'Competition_missing', 'Promo2SinceNWeeks_missing', 'PromoStarted', 'Store', 'sin_week', 'cos_week', 'sin_month', 'cos_month', 'Customers_log', 'CompetitionDistance_log', 'CompetitionSince_log', 'Promo2SinceNWeeks_log'] enc_var_list = ['DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] raw_train = get_all_train_data() raw_test = get_all_test_data() df_train_imputed = create_basetable(raw_train, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_test_imputed = create_basetable(raw_test, {'Store': 0, 'StoreType': 'unknown', 'SchoolHoliday': 'unknown', 'Assortment': 'unknown', 'StateHoliday': 'unknown', 'DayOfWeek': 'unknown', 'Promo': 'unknown', 'Promo2': 'unknown', 'CompetitionDistance': (- 1)}) df_train = df_train_imputed.copy() df_test = df_test_imputed.copy() y_test = df_test['Sales'] custom_transformer_ti(df_train) custom_transformer_ti(df_test) (new_col, target_encoder) = target_encode_Stores(df_train) df_train['Store'] = new_col (new_col, _) = target_encode_Stores(df_test, target_encoder) df_test['Store'] = new_col df_train = df_train[train_vars] df_test = df_test[train_vars] for col in enc_var_list: (df_train, one_hot_encoder) = one_hot_encoder_fit_transform(df_train, col) df_test = one_hot_encoder_transform(df_test, col, one_hot_encoder) return (df_test, y_test)<|docstring|>Data pre-processing for Tom's model<|endoftext|>
d2c6c47edc2f071e7f97214d557796821f32d0b0a39ca7863f7dd47cddf59e0e
def custom_transformer_ti(df): "\n Pre-processing pipeline for Tom's model\n " col_to_str = ['Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] col_to_log_transform = ['Customers', 'CompetitionDistance', 'CompetitionSince', 'Promo2SinceNWeeks'] generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) generate_PromoStarted(df) df['Month'] = df['Date'].dt.month df['Week'] = df['Date'].dt.week for col in col_to_log_transform: df[(col + '_log')] = log_transform(df[col]) (sin_month, cos_month) = generate_cyclic_feature_month(df) df['sin_month'] = sin_month df['cos_month'] = cos_month (sin_week, cos_week) = generate_cyclic_feature_week(df) df['sin_week'] = sin_week df['cos_week'] = cos_week df['Promo'] = df['Promo'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['Promo2'] = df['Promo2'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['SchoolHoliday'] = df['SchoolHoliday'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['StateHoliday'] = df['StateHoliday'].replace({'0': 'n'}).astype(str) for col in col_to_str: df[col] = df[col].astype(str)
Pre-processing pipeline for Tom's model
feature_engineering.py
custom_transformer_ti
ChristopherSD/dsr-minicomp
0
python
def custom_transformer_ti(df): "\n \n " col_to_str = ['Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] col_to_log_transform = ['Customers', 'CompetitionDistance', 'CompetitionSince', 'Promo2SinceNWeeks'] generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) generate_PromoStarted(df) df['Month'] = df['Date'].dt.month df['Week'] = df['Date'].dt.week for col in col_to_log_transform: df[(col + '_log')] = log_transform(df[col]) (sin_month, cos_month) = generate_cyclic_feature_month(df) df['sin_month'] = sin_month df['cos_month'] = cos_month (sin_week, cos_week) = generate_cyclic_feature_week(df) df['sin_week'] = sin_week df['cos_week'] = cos_week df['Promo'] = df['Promo'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['Promo2'] = df['Promo2'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['SchoolHoliday'] = df['SchoolHoliday'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['StateHoliday'] = df['StateHoliday'].replace({'0': 'n'}).astype(str) for col in col_to_str: df[col] = df[col].astype(str)
def custom_transformer_ti(df): "\n \n " col_to_str = ['Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'Promo2'] col_to_log_transform = ['Customers', 'CompetitionDistance', 'CompetitionSince', 'Promo2SinceNWeeks'] generate_CompetitionSince(df) generate_Promo2SinceNWeeks(df) generate_PromoStarted(df) df['Month'] = df['Date'].dt.month df['Week'] = df['Date'].dt.week for col in col_to_log_transform: df[(col + '_log')] = log_transform(df[col]) (sin_month, cos_month) = generate_cyclic_feature_month(df) df['sin_month'] = sin_month df['cos_month'] = cos_month (sin_week, cos_week) = generate_cyclic_feature_week(df) df['sin_week'] = sin_week df['cos_week'] = cos_week df['Promo'] = df['Promo'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['Promo2'] = df['Promo2'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['SchoolHoliday'] = df['SchoolHoliday'].replace({0.0: 'n', 0: 'n', 1.0: 'y', 1: 'y'}).astype(str) df['StateHoliday'] = df['StateHoliday'].replace({'0': 'n'}).astype(str) for col in col_to_str: df[col] = df[col].astype(str)<|docstring|>Pre-processing pipeline for Tom's model<|endoftext|>