body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
86e5c4db03fcdf9a27ba6804dc2d28b947e70e53e115bb84bcfc8e9df661499d
def is_consistent_transaction(self, tx: Transaction) -> bool: '\n Check if the transaction is consistent.\n\n E.g. check that the agent state has enough money if it is a buyer\n or enough holdings if it is a seller.\n\n :param tx: the transaction\n :return: True if the transaction is legal wrt the current state, False otherwise.\n ' result = (self.agent_address in [tx.sender_address, tx.counterparty_address]) result = (result and tx.is_single_currency) if (not result): return result if (all(((amount == 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity == 0) for quantity in tx.quantities_by_good_id.values()))): result = False elif (all(((amount <= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity >= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.sender_payable_amount)) elif (self.agent_address == tx.counterparty_address): result = (result and all(((self.quantities_by_good_id[good_id] >= quantity) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (all(((amount >= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity <= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and all(((self.quantities_by_good_id[good_id] >= (- quantity)) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (self.agent_address == tx.counterparty_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.counterparty_payable_amount)) else: result = False return result
Check if the transaction is consistent. E.g. check that the agent state has enough money if it is a buyer or enough holdings if it is a seller. :param tx: the transaction :return: True if the transaction is legal wrt the current state, False otherwise.
packages/fetchai/skills/tac_control/game.py
is_consistent_transaction
bryanchriswhite/agents-aea
126
python
def is_consistent_transaction(self, tx: Transaction) -> bool: '\n Check if the transaction is consistent.\n\n E.g. check that the agent state has enough money if it is a buyer\n or enough holdings if it is a seller.\n\n :param tx: the transaction\n :return: True if the transaction is legal wrt the current state, False otherwise.\n ' result = (self.agent_address in [tx.sender_address, tx.counterparty_address]) result = (result and tx.is_single_currency) if (not result): return result if (all(((amount == 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity == 0) for quantity in tx.quantities_by_good_id.values()))): result = False elif (all(((amount <= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity >= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.sender_payable_amount)) elif (self.agent_address == tx.counterparty_address): result = (result and all(((self.quantities_by_good_id[good_id] >= quantity) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (all(((amount >= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity <= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and all(((self.quantities_by_good_id[good_id] >= (- quantity)) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (self.agent_address == tx.counterparty_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.counterparty_payable_amount)) else: result = False return result
def is_consistent_transaction(self, tx: Transaction) -> bool: '\n Check if the transaction is consistent.\n\n E.g. check that the agent state has enough money if it is a buyer\n or enough holdings if it is a seller.\n\n :param tx: the transaction\n :return: True if the transaction is legal wrt the current state, False otherwise.\n ' result = (self.agent_address in [tx.sender_address, tx.counterparty_address]) result = (result and tx.is_single_currency) if (not result): return result if (all(((amount == 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity == 0) for quantity in tx.quantities_by_good_id.values()))): result = False elif (all(((amount <= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity >= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.sender_payable_amount)) elif (self.agent_address == tx.counterparty_address): result = (result and all(((self.quantities_by_good_id[good_id] >= quantity) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (all(((amount >= 0) for amount in tx.amount_by_currency_id.values())) and all(((quantity <= 0) for quantity in tx.quantities_by_good_id.values()))): if (self.agent_address == tx.sender_address): result = (result and all(((self.quantities_by_good_id[good_id] >= (- quantity)) for (good_id, quantity) in tx.quantities_by_good_id.items()))) elif (self.agent_address == tx.counterparty_address): result = (result and (self.amount_by_currency_id[tx.currency_id] >= tx.counterparty_payable_amount)) else: result = False return result<|docstring|>Check if the transaction is consistent. E.g. check that the agent state has enough money if it is a buyer or enough holdings if it is a seller. :param tx: the transaction :return: True if the transaction is legal wrt the current state, False otherwise.<|endoftext|>
4d9c2ea6e9fb131b3929924f0922ae8355f927543c0b693fedc9ce9e235b6d53
def apply(self, transactions: List[Transaction]) -> 'AgentState': '\n Apply a list of transactions to the current state.\n\n :param transactions: the sequence of transaction.\n :return: the final state.\n ' new_state = copy.copy(self) for tx in transactions: new_state.update(tx) return new_state
Apply a list of transactions to the current state. :param transactions: the sequence of transaction. :return: the final state.
packages/fetchai/skills/tac_control/game.py
apply
bryanchriswhite/agents-aea
126
python
def apply(self, transactions: List[Transaction]) -> 'AgentState': '\n Apply a list of transactions to the current state.\n\n :param transactions: the sequence of transaction.\n :return: the final state.\n ' new_state = copy.copy(self) for tx in transactions: new_state.update(tx) return new_state
def apply(self, transactions: List[Transaction]) -> 'AgentState': '\n Apply a list of transactions to the current state.\n\n :param transactions: the sequence of transaction.\n :return: the final state.\n ' new_state = copy.copy(self) for tx in transactions: new_state.update(tx) return new_state<|docstring|>Apply a list of transactions to the current state. :param transactions: the sequence of transaction. :return: the final state.<|endoftext|>
3f08c71bdfa77f32cbcb07c7f10bc920fc75fc9daa4cb3df3f17c9a99f611f95
def update(self, tx: Transaction) -> None: '\n Update the agent state from a transaction.\n\n :param tx: the transaction.\n ' enforce(self.is_consistent_transaction(tx), 'Inconsistent transaction.') new_amount_by_currency_id = self.amount_by_currency_id if (self.agent_address == tx.sender_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] += amount elif (self.agent_address == tx.counterparty_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] -= amount self._amount_by_currency_id = new_amount_by_currency_id new_quantities_by_good_id = self.quantities_by_good_id for (good_id, quantity) in tx.quantities_by_good_id.items(): if (self.agent_address == tx.sender_address): new_quantities_by_good_id[good_id] += quantity elif (self.agent_address == tx.counterparty_address): new_quantities_by_good_id[good_id] -= quantity self._quantities_by_good_id = new_quantities_by_good_id
Update the agent state from a transaction. :param tx: the transaction.
packages/fetchai/skills/tac_control/game.py
update
bryanchriswhite/agents-aea
126
python
def update(self, tx: Transaction) -> None: '\n Update the agent state from a transaction.\n\n :param tx: the transaction.\n ' enforce(self.is_consistent_transaction(tx), 'Inconsistent transaction.') new_amount_by_currency_id = self.amount_by_currency_id if (self.agent_address == tx.sender_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] += amount elif (self.agent_address == tx.counterparty_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] -= amount self._amount_by_currency_id = new_amount_by_currency_id new_quantities_by_good_id = self.quantities_by_good_id for (good_id, quantity) in tx.quantities_by_good_id.items(): if (self.agent_address == tx.sender_address): new_quantities_by_good_id[good_id] += quantity elif (self.agent_address == tx.counterparty_address): new_quantities_by_good_id[good_id] -= quantity self._quantities_by_good_id = new_quantities_by_good_id
def update(self, tx: Transaction) -> None: '\n Update the agent state from a transaction.\n\n :param tx: the transaction.\n ' enforce(self.is_consistent_transaction(tx), 'Inconsistent transaction.') new_amount_by_currency_id = self.amount_by_currency_id if (self.agent_address == tx.sender_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] += amount elif (self.agent_address == tx.counterparty_address): for (currency_id, amount) in tx.amount_by_currency_id.items(): new_amount_by_currency_id[currency_id] -= amount self._amount_by_currency_id = new_amount_by_currency_id new_quantities_by_good_id = self.quantities_by_good_id for (good_id, quantity) in tx.quantities_by_good_id.items(): if (self.agent_address == tx.sender_address): new_quantities_by_good_id[good_id] += quantity elif (self.agent_address == tx.counterparty_address): new_quantities_by_good_id[good_id] -= quantity self._quantities_by_good_id = new_quantities_by_good_id<|docstring|>Update the agent state from a transaction. :param tx: the transaction.<|endoftext|>
576505c45b5e3bb31ac196f54632b1e08f2488580d5c6b09103e00e333434e3b
def __copy__(self) -> 'AgentState': 'Copy the object.' return AgentState(self.agent_address, self.amount_by_currency_id, self.exchange_params_by_currency_id, self.quantities_by_good_id, self.utility_params_by_good_id)
Copy the object.
packages/fetchai/skills/tac_control/game.py
__copy__
bryanchriswhite/agents-aea
126
python
def __copy__(self) -> 'AgentState': return AgentState(self.agent_address, self.amount_by_currency_id, self.exchange_params_by_currency_id, self.quantities_by_good_id, self.utility_params_by_good_id)
def __copy__(self) -> 'AgentState': return AgentState(self.agent_address, self.amount_by_currency_id, self.exchange_params_by_currency_id, self.quantities_by_good_id, self.utility_params_by_good_id)<|docstring|>Copy the object.<|endoftext|>
ee61f34631a0fd1a78a2ded8df99c21849d6cd4539e70f987244ec8941f1d587
def __str__(self) -> str: 'From object to string.' return 'AgentState{}'.format(pprint.pformat({'agent_address': self.agent_address, 'amount_by_currency_id': self.amount_by_currency_id, 'exchange_params_by_currency_id': self.exchange_params_by_currency_id, 'quantities_by_good_id': self.quantities_by_good_id, 'utility_params_by_good_id': self.utility_params_by_good_id}))
From object to string.
packages/fetchai/skills/tac_control/game.py
__str__
bryanchriswhite/agents-aea
126
python
def __str__(self) -> str: return 'AgentState{}'.format(pprint.pformat({'agent_address': self.agent_address, 'amount_by_currency_id': self.amount_by_currency_id, 'exchange_params_by_currency_id': self.exchange_params_by_currency_id, 'quantities_by_good_id': self.quantities_by_good_id, 'utility_params_by_good_id': self.utility_params_by_good_id}))
def __str__(self) -> str: return 'AgentState{}'.format(pprint.pformat({'agent_address': self.agent_address, 'amount_by_currency_id': self.amount_by_currency_id, 'exchange_params_by_currency_id': self.exchange_params_by_currency_id, 'quantities_by_good_id': self.quantities_by_good_id, 'utility_params_by_good_id': self.utility_params_by_good_id}))<|docstring|>From object to string.<|endoftext|>
37df2b35d9caa45aa28060859a3f5329a96bc880982ecebf9aaa900731fff0c5
def __eq__(self, other: Any) -> bool: 'Compare equality of two instances of the class.' return (isinstance(other, AgentState) and (self.agent_address == other.agent_address) and (self.amount_by_currency_id == other.amount_by_currency_id) and (self.exchange_params_by_currency_id == other.exchange_params_by_currency_id) and (self.quantities_by_good_id == other.quantities_by_good_id) and (self.utility_params_by_good_id == other.utility_params_by_good_id))
Compare equality of two instances of the class.
packages/fetchai/skills/tac_control/game.py
__eq__
bryanchriswhite/agents-aea
126
python
def __eq__(self, other: Any) -> bool: return (isinstance(other, AgentState) and (self.agent_address == other.agent_address) and (self.amount_by_currency_id == other.amount_by_currency_id) and (self.exchange_params_by_currency_id == other.exchange_params_by_currency_id) and (self.quantities_by_good_id == other.quantities_by_good_id) and (self.utility_params_by_good_id == other.utility_params_by_good_id))
def __eq__(self, other: Any) -> bool: return (isinstance(other, AgentState) and (self.agent_address == other.agent_address) and (self.amount_by_currency_id == other.amount_by_currency_id) and (self.exchange_params_by_currency_id == other.exchange_params_by_currency_id) and (self.quantities_by_good_id == other.quantities_by_good_id) and (self.utility_params_by_good_id == other.utility_params_by_good_id))<|docstring|>Compare equality of two instances of the class.<|endoftext|>
93d04a5315cd880c4e31fad177afd38fa0d13e900aa3417a7be30a5407a21fce
def __init__(self) -> None: 'Instantiate the transaction class.' self._confirmed = {} self._confirmed_per_agent = {}
Instantiate the transaction class.
packages/fetchai/skills/tac_control/game.py
__init__
bryanchriswhite/agents-aea
126
python
def __init__(self) -> None: self._confirmed = {} self._confirmed_per_agent = {}
def __init__(self) -> None: self._confirmed = {} self._confirmed_per_agent = {}<|docstring|>Instantiate the transaction class.<|endoftext|>
1d5600f5e0ecf9a76101ca52f9fd2b329b1169dfaaae7062997e1f6a07b61a82
@property def confirmed(self) -> Dict[(datetime.datetime, Transaction)]: 'Get the confirmed transactions.' return self._confirmed
Get the confirmed transactions.
packages/fetchai/skills/tac_control/game.py
confirmed
bryanchriswhite/agents-aea
126
python
@property def confirmed(self) -> Dict[(datetime.datetime, Transaction)]: return self._confirmed
@property def confirmed(self) -> Dict[(datetime.datetime, Transaction)]: return self._confirmed<|docstring|>Get the confirmed transactions.<|endoftext|>
1044bf4618565490216d650ad7cc9e0d9ce95028921e059405799e5a64ea7b87
@property def confirmed_per_agent(self) -> Dict[(Address, Dict[(datetime.datetime, Transaction)])]: 'Get the confirmed transactions by agent.' return self._confirmed_per_agent
Get the confirmed transactions by agent.
packages/fetchai/skills/tac_control/game.py
confirmed_per_agent
bryanchriswhite/agents-aea
126
python
@property def confirmed_per_agent(self) -> Dict[(Address, Dict[(datetime.datetime, Transaction)])]: return self._confirmed_per_agent
@property def confirmed_per_agent(self) -> Dict[(Address, Dict[(datetime.datetime, Transaction)])]: return self._confirmed_per_agent<|docstring|>Get the confirmed transactions by agent.<|endoftext|>
918811fd5f3635e67c05cdbf63859a79b4816d70076566b6db7dfcb874d84aeb
def add(self, transaction: Transaction) -> None: '\n Add a confirmed transaction.\n\n :param transaction: the transaction\n ' now = datetime.datetime.now() self._confirmed[now] = transaction if (self._confirmed_per_agent.get(transaction.sender_address) is None): self._confirmed_per_agent[transaction.sender_address] = {} self._confirmed_per_agent[transaction.sender_address][now] = transaction if (self._confirmed_per_agent.get(transaction.counterparty_address) is None): self._confirmed_per_agent[transaction.counterparty_address] = {} self._confirmed_per_agent[transaction.counterparty_address][now] = transaction
Add a confirmed transaction. :param transaction: the transaction
packages/fetchai/skills/tac_control/game.py
add
bryanchriswhite/agents-aea
126
python
def add(self, transaction: Transaction) -> None: '\n Add a confirmed transaction.\n\n :param transaction: the transaction\n ' now = datetime.datetime.now() self._confirmed[now] = transaction if (self._confirmed_per_agent.get(transaction.sender_address) is None): self._confirmed_per_agent[transaction.sender_address] = {} self._confirmed_per_agent[transaction.sender_address][now] = transaction if (self._confirmed_per_agent.get(transaction.counterparty_address) is None): self._confirmed_per_agent[transaction.counterparty_address] = {} self._confirmed_per_agent[transaction.counterparty_address][now] = transaction
def add(self, transaction: Transaction) -> None: '\n Add a confirmed transaction.\n\n :param transaction: the transaction\n ' now = datetime.datetime.now() self._confirmed[now] = transaction if (self._confirmed_per_agent.get(transaction.sender_address) is None): self._confirmed_per_agent[transaction.sender_address] = {} self._confirmed_per_agent[transaction.sender_address][now] = transaction if (self._confirmed_per_agent.get(transaction.counterparty_address) is None): self._confirmed_per_agent[transaction.counterparty_address] = {} self._confirmed_per_agent[transaction.counterparty_address][now] = transaction<|docstring|>Add a confirmed transaction. :param transaction: the transaction<|endoftext|>
5a4dc86bbaadcc519146b599ab62da57518b43150ad49e89b9582cc140ff6065
def __init__(self) -> None: 'Instantiate the registration class.' self._agent_addr_to_name = {}
Instantiate the registration class.
packages/fetchai/skills/tac_control/game.py
__init__
bryanchriswhite/agents-aea
126
python
def __init__(self) -> None: self._agent_addr_to_name = {}
def __init__(self) -> None: self._agent_addr_to_name = {}<|docstring|>Instantiate the registration class.<|endoftext|>
ac8644e59379acf87dc4f478745a9214ccd26b9b298ba377f511bbb2b788bde1
@property def agent_addr_to_name(self) -> Dict[(str, str)]: 'Get the registered agent addresses and their names.' return self._agent_addr_to_name
Get the registered agent addresses and their names.
packages/fetchai/skills/tac_control/game.py
agent_addr_to_name
bryanchriswhite/agents-aea
126
python
@property def agent_addr_to_name(self) -> Dict[(str, str)]: return self._agent_addr_to_name
@property def agent_addr_to_name(self) -> Dict[(str, str)]: return self._agent_addr_to_name<|docstring|>Get the registered agent addresses and their names.<|endoftext|>
d7b18dbf6c5b0375332d75e089f9ea1b0056ce4f71093ef1f917b6c53a892a07
@property def nb_agents(self) -> int: 'Get the number of registered agents.' return len(self._agent_addr_to_name)
Get the number of registered agents.
packages/fetchai/skills/tac_control/game.py
nb_agents
bryanchriswhite/agents-aea
126
python
@property def nb_agents(self) -> int: return len(self._agent_addr_to_name)
@property def nb_agents(self) -> int: return len(self._agent_addr_to_name)<|docstring|>Get the number of registered agents.<|endoftext|>
7af81e4d9ef1f5398ccede6e51f54a56b5da9ec568e6388e0fd13b034c9fe468
def register_agent(self, agent_addr: Address, agent_name: str) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n :param agent_name: the name of the agent\n ' self._agent_addr_to_name[agent_addr] = agent_name
Register an agent. :param agent_addr: the Address of the agent :param agent_name: the name of the agent
packages/fetchai/skills/tac_control/game.py
register_agent
bryanchriswhite/agents-aea
126
python
def register_agent(self, agent_addr: Address, agent_name: str) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n :param agent_name: the name of the agent\n ' self._agent_addr_to_name[agent_addr] = agent_name
def register_agent(self, agent_addr: Address, agent_name: str) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n :param agent_name: the name of the agent\n ' self._agent_addr_to_name[agent_addr] = agent_name<|docstring|>Register an agent. :param agent_addr: the Address of the agent :param agent_name: the name of the agent<|endoftext|>
d90f2eb76156c3240068d3a84aacf687963968f8df0bcef13accf261d00cd510
def unregister_agent(self, agent_addr: Address) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n ' self._agent_addr_to_name.pop(agent_addr)
Register an agent. :param agent_addr: the Address of the agent
packages/fetchai/skills/tac_control/game.py
unregister_agent
bryanchriswhite/agents-aea
126
python
def unregister_agent(self, agent_addr: Address) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n ' self._agent_addr_to_name.pop(agent_addr)
def unregister_agent(self, agent_addr: Address) -> None: '\n Register an agent.\n\n :param agent_addr: the Address of the agent\n ' self._agent_addr_to_name.pop(agent_addr)<|docstring|>Register an agent. :param agent_addr: the Address of the agent<|endoftext|>
a6cfc1218c0ad505e8e8d617d72b9f514061bfee7aed521062f8223a43947aef
def __init__(self, **kwargs: Any) -> None: 'Instantiate the search class.' super().__init__(**kwargs) self._phase = Phase.PRE_GAME self._registration = Registration() self._conf = None self._initialization = None self._initial_agent_states = None self._current_agent_states = None self._transactions = Transactions() self._already_minted_agents = [] self._is_allowed_to_mint = True self.is_registered_agent = False
Instantiate the search class.
packages/fetchai/skills/tac_control/game.py
__init__
bryanchriswhite/agents-aea
126
python
def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self._phase = Phase.PRE_GAME self._registration = Registration() self._conf = None self._initialization = None self._initial_agent_states = None self._current_agent_states = None self._transactions = Transactions() self._already_minted_agents = [] self._is_allowed_to_mint = True self.is_registered_agent = False
def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self._phase = Phase.PRE_GAME self._registration = Registration() self._conf = None self._initialization = None self._initial_agent_states = None self._current_agent_states = None self._transactions = Transactions() self._already_minted_agents = [] self._is_allowed_to_mint = True self.is_registered_agent = False<|docstring|>Instantiate the search class.<|endoftext|>
6c7cf583b0a98bc4f2e1c0a7d48ee797a479d8e126eb22ecaeadd91cc8b4ac5d
@property def phase(self) -> Phase: 'Get the game phase.' return self._phase
Get the game phase.
packages/fetchai/skills/tac_control/game.py
phase
bryanchriswhite/agents-aea
126
python
@property def phase(self) -> Phase: return self._phase
@property def phase(self) -> Phase: return self._phase<|docstring|>Get the game phase.<|endoftext|>
ce89d74e9701446c3668446226f3e792291da085f8564d99813abca044c9dafa
@phase.setter def phase(self, phase: Phase) -> None: 'Set the game phase.' self.context.logger.debug('Game phase set to: {}'.format(phase)) self._phase = phase
Set the game phase.
packages/fetchai/skills/tac_control/game.py
phase
bryanchriswhite/agents-aea
126
python
@phase.setter def phase(self, phase: Phase) -> None: self.context.logger.debug('Game phase set to: {}'.format(phase)) self._phase = phase
@phase.setter def phase(self, phase: Phase) -> None: self.context.logger.debug('Game phase set to: {}'.format(phase)) self._phase = phase<|docstring|>Set the game phase.<|endoftext|>
114f90dcbfa54e1384d01d3156ccf67eb0682afde9d44004ef3e8974b0d0ed7b
@property def registration(self) -> Registration: 'Get the registration.' return self._registration
Get the registration.
packages/fetchai/skills/tac_control/game.py
registration
bryanchriswhite/agents-aea
126
python
@property def registration(self) -> Registration: return self._registration
@property def registration(self) -> Registration: return self._registration<|docstring|>Get the registration.<|endoftext|>
a8f0a7b4b0f3d4e1908838aa8a080fa12ea4664e0fd156784e33bed0e03b6bfb
@property def conf(self) -> Configuration: 'Get game configuration.' if (self._conf is None): raise AEAEnforceError('Call create before calling configuration.') return self._conf
Get game configuration.
packages/fetchai/skills/tac_control/game.py
conf
bryanchriswhite/agents-aea
126
python
@property def conf(self) -> Configuration: if (self._conf is None): raise AEAEnforceError('Call create before calling configuration.') return self._conf
@property def conf(self) -> Configuration: if (self._conf is None): raise AEAEnforceError('Call create before calling configuration.') return self._conf<|docstring|>Get game configuration.<|endoftext|>
a71de6ba8104093fb51e7d8371d8945cf823d8a3daeaa5c783b3a077156e7320
@property def initialization(self) -> Initialization: 'Get game initialization.' if (self._initialization is None): raise AEAEnforceError('Call create before calling initialization.') return self._initialization
Get game initialization.
packages/fetchai/skills/tac_control/game.py
initialization
bryanchriswhite/agents-aea
126
python
@property def initialization(self) -> Initialization: if (self._initialization is None): raise AEAEnforceError('Call create before calling initialization.') return self._initialization
@property def initialization(self) -> Initialization: if (self._initialization is None): raise AEAEnforceError('Call create before calling initialization.') return self._initialization<|docstring|>Get game initialization.<|endoftext|>
031b29d0daa7db9e5e2421c28d2d0c09e5e554a239f3e6e42874e7cca741c5cf
@property def initial_agent_states(self) -> Dict[(str, AgentState)]: 'Get initial state of each agent.' if (self._initial_agent_states is None): raise AEAEnforceError('Call create before calling initial_agent_states.') return self._initial_agent_states
Get initial state of each agent.
packages/fetchai/skills/tac_control/game.py
initial_agent_states
bryanchriswhite/agents-aea
126
python
@property def initial_agent_states(self) -> Dict[(str, AgentState)]: if (self._initial_agent_states is None): raise AEAEnforceError('Call create before calling initial_agent_states.') return self._initial_agent_states
@property def initial_agent_states(self) -> Dict[(str, AgentState)]: if (self._initial_agent_states is None): raise AEAEnforceError('Call create before calling initial_agent_states.') return self._initial_agent_states<|docstring|>Get initial state of each agent.<|endoftext|>
2a04f897886f39c7499a65acf298c0bd1002e793da7aac2ba6c9544d1993a49f
@property def current_agent_states(self) -> Dict[(str, AgentState)]: 'Get current state of each agent.' if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') return self._current_agent_states
Get current state of each agent.
packages/fetchai/skills/tac_control/game.py
current_agent_states
bryanchriswhite/agents-aea
126
python
@property def current_agent_states(self) -> Dict[(str, AgentState)]: if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') return self._current_agent_states
@property def current_agent_states(self) -> Dict[(str, AgentState)]: if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') return self._current_agent_states<|docstring|>Get current state of each agent.<|endoftext|>
bbe5b84c38d272a26f54810fdf05eca7cbe0e7604fd249015c65d8551a534b38
@property def transactions(self) -> Transactions: 'Get the transactions.' return self._transactions
Get the transactions.
packages/fetchai/skills/tac_control/game.py
transactions
bryanchriswhite/agents-aea
126
python
@property def transactions(self) -> Transactions: return self._transactions
@property def transactions(self) -> Transactions: return self._transactions<|docstring|>Get the transactions.<|endoftext|>
31898f1aaa1cac53db6f18b924d8dfbc42f421bde765211adb4e001d8febc6f0
def create(self) -> None: 'Create a game.' enforce((self.phase != Phase.GAME), 'A game phase is already active.') self._phase = Phase.GAME_SETUP self._generate()
Create a game.
packages/fetchai/skills/tac_control/game.py
create
bryanchriswhite/agents-aea
126
python
def create(self) -> None: enforce((self.phase != Phase.GAME), 'A game phase is already active.') self._phase = Phase.GAME_SETUP self._generate()
def create(self) -> None: enforce((self.phase != Phase.GAME), 'A game phase is already active.') self._phase = Phase.GAME_SETUP self._generate()<|docstring|>Create a game.<|endoftext|>
a1dc7c0dd810d6777e56cf1e973deece0c53542e0855e8875b81d0250248a553
@property def is_allowed_to_mint(self) -> bool: 'Get is allowed to mint.' return self._is_allowed_to_mint
Get is allowed to mint.
packages/fetchai/skills/tac_control/game.py
is_allowed_to_mint
bryanchriswhite/agents-aea
126
python
@property def is_allowed_to_mint(self) -> bool: return self._is_allowed_to_mint
@property def is_allowed_to_mint(self) -> bool: return self._is_allowed_to_mint<|docstring|>Get is allowed to mint.<|endoftext|>
1048e4e825cf6edfb006e2dff536764672e5a45f051c412e283f68818c6b5db3
@is_allowed_to_mint.setter def is_allowed_to_mint(self, is_allowed_to_mint: bool) -> None: 'Get is allowed to mint.' self._is_allowed_to_mint = is_allowed_to_mint
Get is allowed to mint.
packages/fetchai/skills/tac_control/game.py
is_allowed_to_mint
bryanchriswhite/agents-aea
126
python
@is_allowed_to_mint.setter def is_allowed_to_mint(self, is_allowed_to_mint: bool) -> None: self._is_allowed_to_mint = is_allowed_to_mint
@is_allowed_to_mint.setter def is_allowed_to_mint(self, is_allowed_to_mint: bool) -> None: self._is_allowed_to_mint = is_allowed_to_mint<|docstring|>Get is allowed to mint.<|endoftext|>
b88053b3e1e4b9b7087da01195bf6d7ce44067c207275bcce5b12dcb8246c6b5
def get_next_agent_state_for_minting(self) -> Optional[AgentState]: 'Get next agent state for token minting.' result = None for (agent_addr, agent_state) in self.initial_agent_states.items(): if (agent_addr in self._already_minted_agents): continue self._already_minted_agents.append(agent_addr) result = agent_state break return result
Get next agent state for token minting.
packages/fetchai/skills/tac_control/game.py
get_next_agent_state_for_minting
bryanchriswhite/agents-aea
126
python
def get_next_agent_state_for_minting(self) -> Optional[AgentState]: result = None for (agent_addr, agent_state) in self.initial_agent_states.items(): if (agent_addr in self._already_minted_agents): continue self._already_minted_agents.append(agent_addr) result = agent_state break return result
def get_next_agent_state_for_minting(self) -> Optional[AgentState]: result = None for (agent_addr, agent_state) in self.initial_agent_states.items(): if (agent_addr in self._already_minted_agents): continue self._already_minted_agents.append(agent_addr) result = agent_state break return result<|docstring|>Get next agent state for token minting.<|endoftext|>
5484b3c51f94bc3771caa274c2ca6406ff4274b06985914e719e21577d73891c
def _generate(self) -> None: 'Generate a TAC game.' parameters = cast(Parameters, self.context.parameters) self._conf = Configuration(parameters.version_id, parameters.tx_fee, self.registration.agent_addr_to_name, parameters.currency_id_to_name, parameters.good_id_to_name) scaling_factor = determine_scaling_factor(parameters.money_endowment) agent_addr_to_currency_endowments = generate_currency_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys()), parameters.money_endowment) agent_addr_to_exchange_params = generate_exchange_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys())) agent_addr_to_good_endowments = generate_good_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), parameters.base_good_endowment, parameters.lower_bound_factor, parameters.upper_bound_factor) agent_addr_to_utility_params = generate_utility_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), scaling_factor) (good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) = generate_equilibrium_prices_and_holdings(agent_addr_to_good_endowments, agent_addr_to_utility_params, agent_addr_to_currency_endowments, agent_addr_to_exchange_params, scaling_factor) self._initialization = Initialization(agent_addr_to_currency_endowments, agent_addr_to_exchange_params, agent_addr_to_good_endowments, agent_addr_to_utility_params, good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) self._initial_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys())) self._current_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys()))
Generate a TAC game.
packages/fetchai/skills/tac_control/game.py
_generate
bryanchriswhite/agents-aea
126
python
def _generate(self) -> None: parameters = cast(Parameters, self.context.parameters) self._conf = Configuration(parameters.version_id, parameters.tx_fee, self.registration.agent_addr_to_name, parameters.currency_id_to_name, parameters.good_id_to_name) scaling_factor = determine_scaling_factor(parameters.money_endowment) agent_addr_to_currency_endowments = generate_currency_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys()), parameters.money_endowment) agent_addr_to_exchange_params = generate_exchange_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys())) agent_addr_to_good_endowments = generate_good_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), parameters.base_good_endowment, parameters.lower_bound_factor, parameters.upper_bound_factor) agent_addr_to_utility_params = generate_utility_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), scaling_factor) (good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) = generate_equilibrium_prices_and_holdings(agent_addr_to_good_endowments, agent_addr_to_utility_params, agent_addr_to_currency_endowments, agent_addr_to_exchange_params, scaling_factor) self._initialization = Initialization(agent_addr_to_currency_endowments, agent_addr_to_exchange_params, agent_addr_to_good_endowments, agent_addr_to_utility_params, good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) self._initial_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys())) self._current_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys()))
def _generate(self) -> None: parameters = cast(Parameters, self.context.parameters) self._conf = Configuration(parameters.version_id, parameters.tx_fee, self.registration.agent_addr_to_name, parameters.currency_id_to_name, parameters.good_id_to_name) scaling_factor = determine_scaling_factor(parameters.money_endowment) agent_addr_to_currency_endowments = generate_currency_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys()), parameters.money_endowment) agent_addr_to_exchange_params = generate_exchange_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.currency_id_to_name.keys())) agent_addr_to_good_endowments = generate_good_endowments(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), parameters.base_good_endowment, parameters.lower_bound_factor, parameters.upper_bound_factor) agent_addr_to_utility_params = generate_utility_params(list(self.conf.agent_addr_to_name.keys()), list(self.conf.good_id_to_name.keys()), scaling_factor) (good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) = generate_equilibrium_prices_and_holdings(agent_addr_to_good_endowments, agent_addr_to_utility_params, agent_addr_to_currency_endowments, agent_addr_to_exchange_params, scaling_factor) self._initialization = Initialization(agent_addr_to_currency_endowments, agent_addr_to_exchange_params, agent_addr_to_good_endowments, agent_addr_to_utility_params, good_id_to_eq_prices, agent_addr_to_eq_good_holdings, agent_addr_to_eq_currency_holdings) self._initial_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys())) self._current_agent_states = dict(((agent_addr, AgentState(agent_addr, self.initialization.agent_addr_to_currency_endowments[agent_addr], self.initialization.agent_addr_to_exchange_params[agent_addr], self.initialization.agent_addr_to_good_endowments[agent_addr], self.initialization.agent_addr_to_utility_params[agent_addr])) for agent_addr in self.conf.agent_addr_to_name.keys()))<|docstring|>Generate a TAC game.<|endoftext|>
613279ddbfcddf841a43c6be6ab3ca149f4d0aec665561e35067ecb9293c65e4
@property def holdings_summary(self) -> str: 'Get holdings summary (a string representing the holdings for every agent).' result = ('\n' + 'Current good & money allocation & score: \n') for (agent_addr, agent_state) in self.current_agent_states.items(): result = ((((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':') + '\n') for (good_id, quantity) in agent_state.quantities_by_good_id.items(): result += ((((' ' + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') for (currency_id, amount) in agent_state.amount_by_currency_id.items(): result += ((((' ' + self.conf.currency_id_to_name[currency_id]) + ': ') + str(amount)) + '\n') result += ((' score: ' + str(round(agent_state.get_score(), 2))) + '\n') result = (result + '\n') return result
Get holdings summary (a string representing the holdings for every agent).
packages/fetchai/skills/tac_control/game.py
holdings_summary
bryanchriswhite/agents-aea
126
python
@property def holdings_summary(self) -> str: result = ('\n' + 'Current good & money allocation & score: \n') for (agent_addr, agent_state) in self.current_agent_states.items(): result = ((((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':') + '\n') for (good_id, quantity) in agent_state.quantities_by_good_id.items(): result += ((((' ' + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') for (currency_id, amount) in agent_state.amount_by_currency_id.items(): result += ((((' ' + self.conf.currency_id_to_name[currency_id]) + ': ') + str(amount)) + '\n') result += ((' score: ' + str(round(agent_state.get_score(), 2))) + '\n') result = (result + '\n') return result
@property def holdings_summary(self) -> str: result = ('\n' + 'Current good & money allocation & score: \n') for (agent_addr, agent_state) in self.current_agent_states.items(): result = ((((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':') + '\n') for (good_id, quantity) in agent_state.quantities_by_good_id.items(): result += ((((' ' + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') for (currency_id, amount) in agent_state.amount_by_currency_id.items(): result += ((((' ' + self.conf.currency_id_to_name[currency_id]) + ': ') + str(amount)) + '\n') result += ((' score: ' + str(round(agent_state.get_score(), 2))) + '\n') result = (result + '\n') return result<|docstring|>Get holdings summary (a string representing the holdings for every agent).<|endoftext|>
dca92a353b80c60f6bda13dcdb3989ea22b8156bfed256b160def85196c52e11
@property def equilibrium_summary(self) -> str: 'Get equilibrium summary.' result = ('\n' + 'Equilibrium prices: \n') for (good_id, eq_price) in self.initialization.good_id_to_eq_prices.items(): result = ((((result + self.conf.good_id_to_name[good_id]) + ' ') + str(eq_price)) + '\n') result = (result + '\n') result = (result + 'Equilibrium good allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_good_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (good_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') result = (result + 'Equilibrium money allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_currency_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (currency_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.currency_id_to_name[currency_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') return result
Get equilibrium summary.
packages/fetchai/skills/tac_control/game.py
equilibrium_summary
bryanchriswhite/agents-aea
126
python
@property def equilibrium_summary(self) -> str: result = ('\n' + 'Equilibrium prices: \n') for (good_id, eq_price) in self.initialization.good_id_to_eq_prices.items(): result = ((((result + self.conf.good_id_to_name[good_id]) + ' ') + str(eq_price)) + '\n') result = (result + '\n') result = (result + 'Equilibrium good allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_good_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (good_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') result = (result + 'Equilibrium money allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_currency_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (currency_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.currency_id_to_name[currency_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') return result
@property def equilibrium_summary(self) -> str: result = ('\n' + 'Equilibrium prices: \n') for (good_id, eq_price) in self.initialization.good_id_to_eq_prices.items(): result = ((((result + self.conf.good_id_to_name[good_id]) + ' ') + str(eq_price)) + '\n') result = (result + '\n') result = (result + 'Equilibrium good allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_good_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (good_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.good_id_to_name[good_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') result = (result + 'Equilibrium money allocation: \n') for (agent_addr, eq_allocations) in self.initialization.agent_addr_to_eq_currency_holdings.items(): result = (((result + '- ') + self.conf.agent_addr_to_name[agent_addr]) + ':\n') for (currency_id, quantity) in eq_allocations.items(): result = (((((result + ' ') + self.conf.currency_id_to_name[currency_id]) + ': ') + str(quantity)) + '\n') result = (result + '\n') return result<|docstring|>Get equilibrium summary.<|endoftext|>
ab3ada7309fde0db9ee2d4af258d1606457d57cc55e33b910d5202e0f0b1e19d
def is_transaction_valid(self, tx: Transaction) -> bool: '\n Check whether the transaction is signed correctly and valid given the state of the game.\n\n :param tx: the transaction.\n :return: True if the transaction is valid, False otherwise.\n :raises: AEAEnforceError: if the data in the transaction are not allowed (e.g. negative amount).\n ' sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] result = tx.has_matching_signatures() result = (result and sender_state.is_consistent_transaction(tx)) result = (result and counterparty_state.is_consistent_transaction(tx)) return result
Check whether the transaction is signed correctly and valid given the state of the game. :param tx: the transaction. :return: True if the transaction is valid, False otherwise. :raises: AEAEnforceError: if the data in the transaction are not allowed (e.g. negative amount).
packages/fetchai/skills/tac_control/game.py
is_transaction_valid
bryanchriswhite/agents-aea
126
python
def is_transaction_valid(self, tx: Transaction) -> bool: '\n Check whether the transaction is signed correctly and valid given the state of the game.\n\n :param tx: the transaction.\n :return: True if the transaction is valid, False otherwise.\n :raises: AEAEnforceError: if the data in the transaction are not allowed (e.g. negative amount).\n ' sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] result = tx.has_matching_signatures() result = (result and sender_state.is_consistent_transaction(tx)) result = (result and counterparty_state.is_consistent_transaction(tx)) return result
def is_transaction_valid(self, tx: Transaction) -> bool: '\n Check whether the transaction is signed correctly and valid given the state of the game.\n\n :param tx: the transaction.\n :return: True if the transaction is valid, False otherwise.\n :raises: AEAEnforceError: if the data in the transaction are not allowed (e.g. negative amount).\n ' sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] result = tx.has_matching_signatures() result = (result and sender_state.is_consistent_transaction(tx)) result = (result and counterparty_state.is_consistent_transaction(tx)) return result<|docstring|>Check whether the transaction is signed correctly and valid given the state of the game. :param tx: the transaction. :return: True if the transaction is valid, False otherwise. :raises: AEAEnforceError: if the data in the transaction are not allowed (e.g. negative amount).<|endoftext|>
d5ad0c75abea4f56949c22c055bc807c4a4eb5fe92a8dc634587a28a79eb4067
def settle_transaction(self, tx: Transaction) -> None: '\n Settle a valid transaction.\n\n :param tx: the game transaction.\n :raises: AEAEnforceError if the transaction is not valid.\n ' if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') enforce(self.is_transaction_valid(tx), 'Transaction is not valid.') sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] new_sender_state = sender_state.apply([tx]) new_counterparty_state = counterparty_state.apply([tx]) self.transactions.add(tx) self._current_agent_states.update({tx.sender_address: new_sender_state}) self._current_agent_states.update({tx.counterparty_address: new_counterparty_state})
Settle a valid transaction. :param tx: the game transaction. :raises: AEAEnforceError if the transaction is not valid.
packages/fetchai/skills/tac_control/game.py
settle_transaction
bryanchriswhite/agents-aea
126
python
def settle_transaction(self, tx: Transaction) -> None: '\n Settle a valid transaction.\n\n :param tx: the game transaction.\n :raises: AEAEnforceError if the transaction is not valid.\n ' if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') enforce(self.is_transaction_valid(tx), 'Transaction is not valid.') sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] new_sender_state = sender_state.apply([tx]) new_counterparty_state = counterparty_state.apply([tx]) self.transactions.add(tx) self._current_agent_states.update({tx.sender_address: new_sender_state}) self._current_agent_states.update({tx.counterparty_address: new_counterparty_state})
def settle_transaction(self, tx: Transaction) -> None: '\n Settle a valid transaction.\n\n :param tx: the game transaction.\n :raises: AEAEnforceError if the transaction is not valid.\n ' if (self._current_agent_states is None): raise AEAEnforceError('Call create before calling current_agent_states.') enforce(self.is_transaction_valid(tx), 'Transaction is not valid.') sender_state = self.current_agent_states[tx.sender_address] counterparty_state = self.current_agent_states[tx.counterparty_address] new_sender_state = sender_state.apply([tx]) new_counterparty_state = counterparty_state.apply([tx]) self.transactions.add(tx) self._current_agent_states.update({tx.sender_address: new_sender_state}) self._current_agent_states.update({tx.counterparty_address: new_counterparty_state})<|docstring|>Settle a valid transaction. :param tx: the game transaction. :raises: AEAEnforceError if the transaction is not valid.<|endoftext|>
94efd01da7d1df9b693f379d3ac0f14ced8a2620a31e4d06c7f3da53301d6b8e
def get_location_description(self) -> Description: "\n Get the location description.\n\n :return: a description of the agent's location\n " description = Description(self.context.parameters.agent_location, data_model=AGENT_LOCATION_MODEL) return description
Get the location description. :return: a description of the agent's location
packages/fetchai/skills/tac_control/game.py
get_location_description
bryanchriswhite/agents-aea
126
python
def get_location_description(self) -> Description: "\n Get the location description.\n\n :return: a description of the agent's location\n " description = Description(self.context.parameters.agent_location, data_model=AGENT_LOCATION_MODEL) return description
def get_location_description(self) -> Description: "\n Get the location description.\n\n :return: a description of the agent's location\n " description = Description(self.context.parameters.agent_location, data_model=AGENT_LOCATION_MODEL) return description<|docstring|>Get the location description. :return: a description of the agent's location<|endoftext|>
4389f5d5a3cf2914e8c0ab07bbbfeecfe658f6ac553ed6d27c84a523d9972070
def get_register_tac_description(self) -> Description: 'Get the tac description for registering.' description = Description(self.context.parameters.set_service_data, data_model=AGENT_SET_SERVICE_MODEL) return description
Get the tac description for registering.
packages/fetchai/skills/tac_control/game.py
get_register_tac_description
bryanchriswhite/agents-aea
126
python
def get_register_tac_description(self) -> Description: description = Description(self.context.parameters.set_service_data, data_model=AGENT_SET_SERVICE_MODEL) return description
def get_register_tac_description(self) -> Description: description = Description(self.context.parameters.set_service_data, data_model=AGENT_SET_SERVICE_MODEL) return description<|docstring|>Get the tac description for registering.<|endoftext|>
4003d9e9a43e8840fe95b2a7264f96051f6557cb736e2fcc8b0753925475f46f
def get_register_personality_description(self) -> Description: '\n Get the register personality description.\n\n :return: a description of the personality\n ' description = Description(self.context.parameters.set_personality_data, data_model=AGENT_PERSONALITY_MODEL) return description
Get the register personality description. :return: a description of the personality
packages/fetchai/skills/tac_control/game.py
get_register_personality_description
bryanchriswhite/agents-aea
126
python
def get_register_personality_description(self) -> Description: '\n Get the register personality description.\n\n :return: a description of the personality\n ' description = Description(self.context.parameters.set_personality_data, data_model=AGENT_PERSONALITY_MODEL) return description
def get_register_personality_description(self) -> Description: '\n Get the register personality description.\n\n :return: a description of the personality\n ' description = Description(self.context.parameters.set_personality_data, data_model=AGENT_PERSONALITY_MODEL) return description<|docstring|>Get the register personality description. :return: a description of the personality<|endoftext|>
b8ec62da2a205f270222f91511de5bbff9e146d663c0b513749da73aea9c9e4d
def get_register_classification_description(self) -> Description: '\n Get the register classification description.\n\n :return: a description of the classification\n ' description = Description(self.context.parameters.set_classification, data_model=AGENT_PERSONALITY_MODEL) return description
Get the register classification description. :return: a description of the classification
packages/fetchai/skills/tac_control/game.py
get_register_classification_description
bryanchriswhite/agents-aea
126
python
def get_register_classification_description(self) -> Description: '\n Get the register classification description.\n\n :return: a description of the classification\n ' description = Description(self.context.parameters.set_classification, data_model=AGENT_PERSONALITY_MODEL) return description
def get_register_classification_description(self) -> Description: '\n Get the register classification description.\n\n :return: a description of the classification\n ' description = Description(self.context.parameters.set_classification, data_model=AGENT_PERSONALITY_MODEL) return description<|docstring|>Get the register classification description. :return: a description of the classification<|endoftext|>
0dae63cd56cb2fd7cf34aa19102e30a23135aade4262356defedc6e15c5a2efa
def get_unregister_tac_description(self) -> Description: 'Get the tac description for unregistering.' description = Description(self.context.parameters.remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL) return description
Get the tac description for unregistering.
packages/fetchai/skills/tac_control/game.py
get_unregister_tac_description
bryanchriswhite/agents-aea
126
python
def get_unregister_tac_description(self) -> Description: description = Description(self.context.parameters.remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL) return description
def get_unregister_tac_description(self) -> Description: description = Description(self.context.parameters.remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL) return description<|docstring|>Get the tac description for unregistering.<|endoftext|>
927c3209022aa00fa703928fd2ca0671e9ebddb1b566c165ceae58893fc5a24c
def rollout(process_index: int, rollout_config: RolloutConfig, config: CNNConfigBase) -> None: 'Produce episode data using epsilon-greedy.\n Args:\n process_index: Index of process.\n rollout_config: Configuration of rollout.\n config: Configuration of DQN agent.\n ' assert rollout_config.out.exists() if (process_index == 0): pprint.pprint(asdict(rollout_config)) pprint.pprint(asdict(config)) policy = Policy(config=config) if rollout_config.snap: assert rollout_config.snap.exists() learner = Learner(config=config) learner.load_online_model(rollout_config.snap) policy.update_model_param(learner.get_model_param(), only_online_model=True) explorer = Explorer(action_size=config.model.action_size, init_eps=rollout_config.eps, init_beta=config.intrinsic_reward.reward_ratio, use_intrinsic_reward=False, use_ucb=False, apply_value_scaling=config.apply_value_scaling) rollout_logger = RolloutLogger(out_dir=rollout_config.out, render=rollout_config.render, filename_header=str(process_index)) env = make_env(config.env) render = (lambda : env.render(mode='rgb_array')) for i in range(rollout_config.num_episode): print(f'process: {process_index} # episode = {i}') obs = env.reset() done = False rollout_logger.on_reset(obs, (render() if rollout_config.render else None)) while (not done): (prediction, intrinsic_reward) = policy.infer([obs]) (q_e, q_i) = prediction.as_numpy_tuple() action = explorer.select_action(q_e[0]) (obs, reward, done, info) = env.step(action) rollout_logger.on_step(action=action, q_e=q_e[0].tolist(), q_i=(q_i[0].tolist() if (q_i is not None) else None), intrinsic_reward=(intrinsic_reward[0] if (intrinsic_reward is not None) else None), obs=obs, reward=reward, info=info, done=done, image_frame=(render() if rollout_config.render else None))
Produce episode data using epsilon-greedy. Args: process_index: Index of process. rollout_config: Configuration of rollout. config: Configuration of DQN agent.
bin/rollout_cnn.py
rollout
DwangoMediaVillage/marltas_core
9
python
def rollout(process_index: int, rollout_config: RolloutConfig, config: CNNConfigBase) -> None: 'Produce episode data using epsilon-greedy.\n Args:\n process_index: Index of process.\n rollout_config: Configuration of rollout.\n config: Configuration of DQN agent.\n ' assert rollout_config.out.exists() if (process_index == 0): pprint.pprint(asdict(rollout_config)) pprint.pprint(asdict(config)) policy = Policy(config=config) if rollout_config.snap: assert rollout_config.snap.exists() learner = Learner(config=config) learner.load_online_model(rollout_config.snap) policy.update_model_param(learner.get_model_param(), only_online_model=True) explorer = Explorer(action_size=config.model.action_size, init_eps=rollout_config.eps, init_beta=config.intrinsic_reward.reward_ratio, use_intrinsic_reward=False, use_ucb=False, apply_value_scaling=config.apply_value_scaling) rollout_logger = RolloutLogger(out_dir=rollout_config.out, render=rollout_config.render, filename_header=str(process_index)) env = make_env(config.env) render = (lambda : env.render(mode='rgb_array')) for i in range(rollout_config.num_episode): print(f'process: {process_index} # episode = {i}') obs = env.reset() done = False rollout_logger.on_reset(obs, (render() if rollout_config.render else None)) while (not done): (prediction, intrinsic_reward) = policy.infer([obs]) (q_e, q_i) = prediction.as_numpy_tuple() action = explorer.select_action(q_e[0]) (obs, reward, done, info) = env.step(action) rollout_logger.on_step(action=action, q_e=q_e[0].tolist(), q_i=(q_i[0].tolist() if (q_i is not None) else None), intrinsic_reward=(intrinsic_reward[0] if (intrinsic_reward is not None) else None), obs=obs, reward=reward, info=info, done=done, image_frame=(render() if rollout_config.render else None))
def rollout(process_index: int, rollout_config: RolloutConfig, config: CNNConfigBase) -> None: 'Produce episode data using epsilon-greedy.\n Args:\n process_index: Index of process.\n rollout_config: Configuration of rollout.\n config: Configuration of DQN agent.\n ' assert rollout_config.out.exists() if (process_index == 0): pprint.pprint(asdict(rollout_config)) pprint.pprint(asdict(config)) policy = Policy(config=config) if rollout_config.snap: assert rollout_config.snap.exists() learner = Learner(config=config) learner.load_online_model(rollout_config.snap) policy.update_model_param(learner.get_model_param(), only_online_model=True) explorer = Explorer(action_size=config.model.action_size, init_eps=rollout_config.eps, init_beta=config.intrinsic_reward.reward_ratio, use_intrinsic_reward=False, use_ucb=False, apply_value_scaling=config.apply_value_scaling) rollout_logger = RolloutLogger(out_dir=rollout_config.out, render=rollout_config.render, filename_header=str(process_index)) env = make_env(config.env) render = (lambda : env.render(mode='rgb_array')) for i in range(rollout_config.num_episode): print(f'process: {process_index} # episode = {i}') obs = env.reset() done = False rollout_logger.on_reset(obs, (render() if rollout_config.render else None)) while (not done): (prediction, intrinsic_reward) = policy.infer([obs]) (q_e, q_i) = prediction.as_numpy_tuple() action = explorer.select_action(q_e[0]) (obs, reward, done, info) = env.step(action) rollout_logger.on_step(action=action, q_e=q_e[0].tolist(), q_i=(q_i[0].tolist() if (q_i is not None) else None), intrinsic_reward=(intrinsic_reward[0] if (intrinsic_reward is not None) else None), obs=obs, reward=reward, info=info, done=done, image_frame=(render() if rollout_config.render else None))<|docstring|>Produce episode data using epsilon-greedy. Args: process_index: Index of process. rollout_config: Configuration of rollout. config: Configuration of DQN agent.<|endoftext|>
bafaec14ccb55ea0ee323b4e99848b5b3d254375ab2eb5f35fcd67e3deb1689d
def main(rollout_config: RolloutConfig, config: CNNConfigBase) -> None: 'Kick `rollout` using Multiprocessing' if (rollout_config.num_process == 1): rollout(process_index=0, rollout_config=rollout_config, config=config) else: import functools import multiprocessing with multiprocessing.Pool(processes=rollout_config.num_process) as pool: [p for p in pool.imap_unordered(functools.partial(rollout, rollout_config=rollout_config, config=config), range(rollout_config.num_process))]
Kick `rollout` using Multiprocessing
bin/rollout_cnn.py
main
DwangoMediaVillage/marltas_core
9
python
def main(rollout_config: RolloutConfig, config: CNNConfigBase) -> None: if (rollout_config.num_process == 1): rollout(process_index=0, rollout_config=rollout_config, config=config) else: import functools import multiprocessing with multiprocessing.Pool(processes=rollout_config.num_process) as pool: [p for p in pool.imap_unordered(functools.partial(rollout, rollout_config=rollout_config, config=config), range(rollout_config.num_process))]
def main(rollout_config: RolloutConfig, config: CNNConfigBase) -> None: if (rollout_config.num_process == 1): rollout(process_index=0, rollout_config=rollout_config, config=config) else: import functools import multiprocessing with multiprocessing.Pool(processes=rollout_config.num_process) as pool: [p for p in pool.imap_unordered(functools.partial(rollout, rollout_config=rollout_config, config=config), range(rollout_config.num_process))]<|docstring|>Kick `rollout` using Multiprocessing<|endoftext|>
6810871e14cd94b043a6a034f78303d9db04560ff1449f5b6340fc8e805dfdb9
def __init__(self, min_freq, max_freq, min_duty=0, max_duty=100): 'Create a PWM provider' self.__logger = logging.getLogger('hw.pwm.pwm-provider') self._duty = 0 self._min_duty = min_duty self._max_duty = max_duty self._min_freq = min_freq self._max_freq = max_freq
Create a PWM provider
src/hw/pwm/pwm_provider.py
__init__
geoff-coppertop/train-turnout-control-python
0
python
def __init__(self, min_freq, max_freq, min_duty=0, max_duty=100): self.__logger = logging.getLogger('hw.pwm.pwm-provider') self._duty = 0 self._min_duty = min_duty self._max_duty = max_duty self._min_freq = min_freq self._max_freq = max_freq
def __init__(self, min_freq, max_freq, min_duty=0, max_duty=100): self.__logger = logging.getLogger('hw.pwm.pwm-provider') self._duty = 0 self._min_duty = min_duty self._max_duty = max_duty self._min_freq = min_freq self._max_freq = max_freq<|docstring|>Create a PWM provider<|endoftext|>
c846483d7d8ab5f8c3d52ad5512f59392bcc5905ebceba14f026c1b01f6392c0
def set_duty(self, duty): 'Set the duty cycle of the PWM provider' self.__logger.info('Duty Cycle: %d', duty) self._duty = duty
Set the duty cycle of the PWM provider
src/hw/pwm/pwm_provider.py
set_duty
geoff-coppertop/train-turnout-control-python
0
python
def set_duty(self, duty): self.__logger.info('Duty Cycle: %d', duty) self._duty = duty
def set_duty(self, duty): self.__logger.info('Duty Cycle: %d', duty) self._duty = duty<|docstring|>Set the duty cycle of the PWM provider<|endoftext|>
8fe1ba7a4b538686254e12988c9e034ac7bb2d422778c38483a8e2ac56615acf
def set_freq(self, freq): 'Set the frequency of the PWM provider' raise NotImplementedError("You're trying to use an abstract method to get frequency.")
Set the frequency of the PWM provider
src/hw/pwm/pwm_provider.py
set_freq
geoff-coppertop/train-turnout-control-python
0
python
def set_freq(self, freq): raise NotImplementedError("You're trying to use an abstract method to get frequency.")
def set_freq(self, freq): raise NotImplementedError("You're trying to use an abstract method to get frequency.")<|docstring|>Set the frequency of the PWM provider<|endoftext|>
24db20a14cb56af2be891309ed9130be5af2e9be743352a45a188336bf0e54f2
def turn_on(self): 'Turn on the PWM provider at the set duty cycle' raise NotImplementedError("You're trying to use an abstract method to turn the output on.")
Turn on the PWM provider at the set duty cycle
src/hw/pwm/pwm_provider.py
turn_on
geoff-coppertop/train-turnout-control-python
0
python
def turn_on(self): raise NotImplementedError("You're trying to use an abstract method to turn the output on.")
def turn_on(self): raise NotImplementedError("You're trying to use an abstract method to turn the output on.")<|docstring|>Turn on the PWM provider at the set duty cycle<|endoftext|>
8f1203d86c6efeb4343c2cd4e833f3f9045b42d7bd7893f55aff8daca24010fc
def turn_off(self): 'Turn off the PWM provider' raise NotImplementedError("You're trying to use an abstract method to turn the output off.")
Turn off the PWM provider
src/hw/pwm/pwm_provider.py
turn_off
geoff-coppertop/train-turnout-control-python
0
python
def turn_off(self): raise NotImplementedError("You're trying to use an abstract method to turn the output off.")
def turn_off(self): raise NotImplementedError("You're trying to use an abstract method to turn the output off.")<|docstring|>Turn off the PWM provider<|endoftext|>
02e5c1ce9ec1e0deb315442ebbc19b718fbcdb1cd3bab734c4bd072c85f7057e
def load_transaction_items(filename, date_column='order_date', order_id_column='order_id', customer_id_column='customer_id', sku_column='sku', quantity_column='quantity', unit_price_column='unit_price'): "Load a CSV of transactional item data, sets standard column names, and calculates line price.\n\n Args:\n filename (str): Filename and path of CSV file containing transaction items.\n date_column (str, optional): Name of order date column, default is order_date\n order_id_column (str, optional): Name of order ID column, default is order_id\n customer_id_column (str, optional): Name of customer ID column, default is customer_id\n sku_column (str, optional): Name of SKU column, default is sku\n quantity_column (int, optional): Name of quantity column, default is quantity\n unit_price_column (float, optional): Name of unit price column, default is unit_price\n\n Usage:\n transaction_items = rt.load_data('data/input/transaction_items_non_standard_names.csv',\n date_column='InvoiceDate',\n order_id_column='OrderId',\n customer_id_column='CustomerId',\n sku_column='VariantId',\n quantity_column='Qty',\n unit_price_column='Price'\n )\n\n Returns:\n A Pandas dataframe containing the same data with the column names changed to the\n standardised names used throughout RetailTools, if they do not already match, and\n the order_date column correctly set as a datetime column. If the user provides a\n CSV file in which the column names are already set to these values, it it not a\n requirement to provide them.\n\n " df = pd.read_csv(filename, parse_dates=[date_column]) df = df.rename(columns={date_column: 'order_date', order_id_column: 'order_id', customer_id_column: 'customer_id', sku_column: 'sku', quantity_column: 'quantity', unit_price_column: 'unit_price'}) df['line_price'] = round((df['quantity'] * df['unit_price']), 2) return df
Load a CSV of transactional item data, sets standard column names, and calculates line price. Args: filename (str): Filename and path of CSV file containing transaction items. date_column (str, optional): Name of order date column, default is order_date order_id_column (str, optional): Name of order ID column, default is order_id customer_id_column (str, optional): Name of customer ID column, default is customer_id sku_column (str, optional): Name of SKU column, default is sku quantity_column (int, optional): Name of quantity column, default is quantity unit_price_column (float, optional): Name of unit price column, default is unit_price Usage: transaction_items = rt.load_data('data/input/transaction_items_non_standard_names.csv', date_column='InvoiceDate', order_id_column='OrderId', customer_id_column='CustomerId', sku_column='VariantId', quantity_column='Qty', unit_price_column='Price' ) Returns: A Pandas dataframe containing the same data with the column names changed to the standardised names used throughout RetailTools, if they do not already match, and the order_date column correctly set as a datetime column. If the user provides a CSV file in which the column names are already set to these values, it it not a requirement to provide them.
ecommercetools/utilities/tools.py
load_transaction_items
admariner/ecommercetools
43
python
def load_transaction_items(filename, date_column='order_date', order_id_column='order_id', customer_id_column='customer_id', sku_column='sku', quantity_column='quantity', unit_price_column='unit_price'): "Load a CSV of transactional item data, sets standard column names, and calculates line price.\n\n Args:\n filename (str): Filename and path of CSV file containing transaction items.\n date_column (str, optional): Name of order date column, default is order_date\n order_id_column (str, optional): Name of order ID column, default is order_id\n customer_id_column (str, optional): Name of customer ID column, default is customer_id\n sku_column (str, optional): Name of SKU column, default is sku\n quantity_column (int, optional): Name of quantity column, default is quantity\n unit_price_column (float, optional): Name of unit price column, default is unit_price\n\n Usage:\n transaction_items = rt.load_data('data/input/transaction_items_non_standard_names.csv',\n date_column='InvoiceDate',\n order_id_column='OrderId',\n customer_id_column='CustomerId',\n sku_column='VariantId',\n quantity_column='Qty',\n unit_price_column='Price'\n )\n\n Returns:\n A Pandas dataframe containing the same data with the column names changed to the\n standardised names used throughout RetailTools, if they do not already match, and\n the order_date column correctly set as a datetime column. If the user provides a\n CSV file in which the column names are already set to these values, it it not a\n requirement to provide them.\n\n " df = pd.read_csv(filename, parse_dates=[date_column]) df = df.rename(columns={date_column: 'order_date', order_id_column: 'order_id', customer_id_column: 'customer_id', sku_column: 'sku', quantity_column: 'quantity', unit_price_column: 'unit_price'}) df['line_price'] = round((df['quantity'] * df['unit_price']), 2) return df
def load_transaction_items(filename, date_column='order_date', order_id_column='order_id', customer_id_column='customer_id', sku_column='sku', quantity_column='quantity', unit_price_column='unit_price'): "Load a CSV of transactional item data, sets standard column names, and calculates line price.\n\n Args:\n filename (str): Filename and path of CSV file containing transaction items.\n date_column (str, optional): Name of order date column, default is order_date\n order_id_column (str, optional): Name of order ID column, default is order_id\n customer_id_column (str, optional): Name of customer ID column, default is customer_id\n sku_column (str, optional): Name of SKU column, default is sku\n quantity_column (int, optional): Name of quantity column, default is quantity\n unit_price_column (float, optional): Name of unit price column, default is unit_price\n\n Usage:\n transaction_items = rt.load_data('data/input/transaction_items_non_standard_names.csv',\n date_column='InvoiceDate',\n order_id_column='OrderId',\n customer_id_column='CustomerId',\n sku_column='VariantId',\n quantity_column='Qty',\n unit_price_column='Price'\n )\n\n Returns:\n A Pandas dataframe containing the same data with the column names changed to the\n standardised names used throughout RetailTools, if they do not already match, and\n the order_date column correctly set as a datetime column. If the user provides a\n CSV file in which the column names are already set to these values, it it not a\n requirement to provide them.\n\n " df = pd.read_csv(filename, parse_dates=[date_column]) df = df.rename(columns={date_column: 'order_date', order_id_column: 'order_id', customer_id_column: 'customer_id', sku_column: 'sku', quantity_column: 'quantity', unit_price_column: 'unit_price'}) df['line_price'] = round((df['quantity'] * df['unit_price']), 2) return df<|docstring|>Load a CSV of transactional item data, sets standard column names, and calculates line price. Args: filename (str): Filename and path of CSV file containing transaction items. date_column (str, optional): Name of order date column, default is order_date order_id_column (str, optional): Name of order ID column, default is order_id customer_id_column (str, optional): Name of customer ID column, default is customer_id sku_column (str, optional): Name of SKU column, default is sku quantity_column (int, optional): Name of quantity column, default is quantity unit_price_column (float, optional): Name of unit price column, default is unit_price Usage: transaction_items = rt.load_data('data/input/transaction_items_non_standard_names.csv', date_column='InvoiceDate', order_id_column='OrderId', customer_id_column='CustomerId', sku_column='VariantId', quantity_column='Qty', unit_price_column='Price' ) Returns: A Pandas dataframe containing the same data with the column names changed to the standardised names used throughout RetailTools, if they do not already match, and the order_date column correctly set as a datetime column. If the user provides a CSV file in which the column names are already set to these values, it it not a requirement to provide them.<|endoftext|>
02b94a0def17014b5280450ee95fe81edeb998069282f6d8ae0251e6c84ac0db
def load_sample_data(): 'Load the Online Retail dataset of transaction items and format for use within EcommerceTools functions.\n\n :return: Pandas dataframe.\n ' df = pd.read_csv('https://raw.githubusercontent.com/databricks/Spark-The-Definitive-Guide/master/data/retail-data/all/online-retail-dataset.csv', names=['order_id', 'sku', 'description', 'quantity', 'order_date', 'unit_price', 'customer_id', 'country'], skiprows=1, parse_dates=['order_date']) df['line_price'] = (df['unit_price'] * df['quantity']) return df
Load the Online Retail dataset of transaction items and format for use within EcommerceTools functions. :return: Pandas dataframe.
ecommercetools/utilities/tools.py
load_sample_data
admariner/ecommercetools
43
python
def load_sample_data(): 'Load the Online Retail dataset of transaction items and format for use within EcommerceTools functions.\n\n :return: Pandas dataframe.\n ' df = pd.read_csv('https://raw.githubusercontent.com/databricks/Spark-The-Definitive-Guide/master/data/retail-data/all/online-retail-dataset.csv', names=['order_id', 'sku', 'description', 'quantity', 'order_date', 'unit_price', 'customer_id', 'country'], skiprows=1, parse_dates=['order_date']) df['line_price'] = (df['unit_price'] * df['quantity']) return df
def load_sample_data(): 'Load the Online Retail dataset of transaction items and format for use within EcommerceTools functions.\n\n :return: Pandas dataframe.\n ' df = pd.read_csv('https://raw.githubusercontent.com/databricks/Spark-The-Definitive-Guide/master/data/retail-data/all/online-retail-dataset.csv', names=['order_id', 'sku', 'description', 'quantity', 'order_date', 'unit_price', 'customer_id', 'country'], skiprows=1, parse_dates=['order_date']) df['line_price'] = (df['unit_price'] * df['quantity']) return df<|docstring|>Load the Online Retail dataset of transaction items and format for use within EcommerceTools functions. :return: Pandas dataframe.<|endoftext|>
c7124420044b487d9af1df5f274af85b43a7f4f01b102730a3f421cf26ff92ae
def get_cumulative_count(df, group_column, count_column, sort_column): "Get the cumulative count of a column based on a GroupBy.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (string): Column to group by.\n count_column (string): Column to count.\n sort_column (string): Column to sort by.\n\n Returns:\n Cumulative count of the column.\n\n Usage:\n df['running_total'] = get_cumulative_count(df, 'customer_id', 'order_id', 'date_created')\n " df = df.sort_values(by=sort_column, ascending=True) return df.groupby([group_column])[count_column].cumcount()
Get the cumulative count of a column based on a GroupBy. Args: df (object): Pandas DataFrame. group_column (string): Column to group by. count_column (string): Column to count. sort_column (string): Column to sort by. Returns: Cumulative count of the column. Usage: df['running_total'] = get_cumulative_count(df, 'customer_id', 'order_id', 'date_created')
ecommercetools/utilities/tools.py
get_cumulative_count
admariner/ecommercetools
43
python
def get_cumulative_count(df, group_column, count_column, sort_column): "Get the cumulative count of a column based on a GroupBy.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (string): Column to group by.\n count_column (string): Column to count.\n sort_column (string): Column to sort by.\n\n Returns:\n Cumulative count of the column.\n\n Usage:\n df['running_total'] = get_cumulative_count(df, 'customer_id', 'order_id', 'date_created')\n " df = df.sort_values(by=sort_column, ascending=True) return df.groupby([group_column])[count_column].cumcount()
def get_cumulative_count(df, group_column, count_column, sort_column): "Get the cumulative count of a column based on a GroupBy.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (string): Column to group by.\n count_column (string): Column to count.\n sort_column (string): Column to sort by.\n\n Returns:\n Cumulative count of the column.\n\n Usage:\n df['running_total'] = get_cumulative_count(df, 'customer_id', 'order_id', 'date_created')\n " df = df.sort_values(by=sort_column, ascending=True) return df.groupby([group_column])[count_column].cumcount()<|docstring|>Get the cumulative count of a column based on a GroupBy. Args: df (object): Pandas DataFrame. group_column (string): Column to group by. count_column (string): Column to count. sort_column (string): Column to sort by. Returns: Cumulative count of the column. Usage: df['running_total'] = get_cumulative_count(df, 'customer_id', 'order_id', 'date_created')<|endoftext|>
1d8c2ba7b859927a05590afacc9dc56dd5eac7a3a04f9e666da9b6bc9c1614d1
def get_previous_value(df, group_column, value_column): 'Group by a column and return the previous value of another column and assign value to a new column.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (str): Column name to group by\n value_column (str): Column value to return.\n\n Returns:\n Original DataFrame with new column containing previous value of named column.\n ' df = df.copy() df = df.sort_values(by=[value_column], ascending=False) return df.groupby([group_column])[value_column].shift((- 1))
Group by a column and return the previous value of another column and assign value to a new column. Args: df (object): Pandas DataFrame. group_column (str): Column name to group by value_column (str): Column value to return. Returns: Original DataFrame with new column containing previous value of named column.
ecommercetools/utilities/tools.py
get_previous_value
admariner/ecommercetools
43
python
def get_previous_value(df, group_column, value_column): 'Group by a column and return the previous value of another column and assign value to a new column.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (str): Column name to group by\n value_column (str): Column value to return.\n\n Returns:\n Original DataFrame with new column containing previous value of named column.\n ' df = df.copy() df = df.sort_values(by=[value_column], ascending=False) return df.groupby([group_column])[value_column].shift((- 1))
def get_previous_value(df, group_column, value_column): 'Group by a column and return the previous value of another column and assign value to a new column.\n\n Args:\n df (object): Pandas DataFrame.\n group_column (str): Column name to group by\n value_column (str): Column value to return.\n\n Returns:\n Original DataFrame with new column containing previous value of named column.\n ' df = df.copy() df = df.sort_values(by=[value_column], ascending=False) return df.groupby([group_column])[value_column].shift((- 1))<|docstring|>Group by a column and return the previous value of another column and assign value to a new column. Args: df (object): Pandas DataFrame. group_column (str): Column name to group by value_column (str): Column value to return. Returns: Original DataFrame with new column containing previous value of named column.<|endoftext|>
113fd97b90007a471d4ec9ec54d27a84dc5600a5a081bc6cd07267cf5334bf4b
def get_days_since_date(df, before_datetime, after_datetime): 'Return a new column containing the difference between two dates in days.\n\n Args:\n df (object): Pandas DataFrame.\n before_datetime (datetime): Earliest datetime (will convert value)\n after_datetime (datetime): Latest datetime (will convert value)\n\n Returns:\n New column value\n ' df = df.copy() df[before_datetime] = pd.to_datetime(df[before_datetime]) df[after_datetime] = pd.to_datetime(df[after_datetime]) diff = (df[after_datetime] - df[before_datetime]) return round((diff / np.timedelta64(1, 'D'))).fillna(0).astype(int)
Return a new column containing the difference between two dates in days. Args: df (object): Pandas DataFrame. before_datetime (datetime): Earliest datetime (will convert value) after_datetime (datetime): Latest datetime (will convert value) Returns: New column value
ecommercetools/utilities/tools.py
get_days_since_date
admariner/ecommercetools
43
python
def get_days_since_date(df, before_datetime, after_datetime): 'Return a new column containing the difference between two dates in days.\n\n Args:\n df (object): Pandas DataFrame.\n before_datetime (datetime): Earliest datetime (will convert value)\n after_datetime (datetime): Latest datetime (will convert value)\n\n Returns:\n New column value\n ' df = df.copy() df[before_datetime] = pd.to_datetime(df[before_datetime]) df[after_datetime] = pd.to_datetime(df[after_datetime]) diff = (df[after_datetime] - df[before_datetime]) return round((diff / np.timedelta64(1, 'D'))).fillna(0).astype(int)
def get_days_since_date(df, before_datetime, after_datetime): 'Return a new column containing the difference between two dates in days.\n\n Args:\n df (object): Pandas DataFrame.\n before_datetime (datetime): Earliest datetime (will convert value)\n after_datetime (datetime): Latest datetime (will convert value)\n\n Returns:\n New column value\n ' df = df.copy() df[before_datetime] = pd.to_datetime(df[before_datetime]) df[after_datetime] = pd.to_datetime(df[after_datetime]) diff = (df[after_datetime] - df[before_datetime]) return round((diff / np.timedelta64(1, 'D'))).fillna(0).astype(int)<|docstring|>Return a new column containing the difference between two dates in days. Args: df (object): Pandas DataFrame. before_datetime (datetime): Earliest datetime (will convert value) after_datetime (datetime): Latest datetime (will convert value) Returns: New column value<|endoftext|>
15c43953238561822beca353f260b4197503097999e7c6858e8e5c186c78214f
def date_subtract(date, days): 'Given a date, subtract a specified number of days, and return the date.\n\n Args:\n date (datetime): Original date to subtract from.\n days (int): Number of days to subtract from date.\n\n Return:\n subtracted_date (datetime): Original date with days subtracted.\n ' return (pd.to_datetime(date) - timedelta(days=days))
Given a date, subtract a specified number of days, and return the date. Args: date (datetime): Original date to subtract from. days (int): Number of days to subtract from date. Return: subtracted_date (datetime): Original date with days subtracted.
ecommercetools/utilities/tools.py
date_subtract
admariner/ecommercetools
43
python
def date_subtract(date, days): 'Given a date, subtract a specified number of days, and return the date.\n\n Args:\n date (datetime): Original date to subtract from.\n days (int): Number of days to subtract from date.\n\n Return:\n subtracted_date (datetime): Original date with days subtracted.\n ' return (pd.to_datetime(date) - timedelta(days=days))
def date_subtract(date, days): 'Given a date, subtract a specified number of days, and return the date.\n\n Args:\n date (datetime): Original date to subtract from.\n days (int): Number of days to subtract from date.\n\n Return:\n subtracted_date (datetime): Original date with days subtracted.\n ' return (pd.to_datetime(date) - timedelta(days=days))<|docstring|>Given a date, subtract a specified number of days, and return the date. Args: date (datetime): Original date to subtract from. days (int): Number of days to subtract from date. Return: subtracted_date (datetime): Original date with days subtracted.<|endoftext|>
fef76b8a902876b8dca166b49ca0369ab1f884329b81b8bc929776bac8901089
def select_last_x_days(df, date_column='order_date', days=365): 'Select the last X days from a Pandas dataframe.\n\n Args:\n df (object): Pandas dataframe containing time series data.\n date_column (str, optional): Name of column containing date. Default is order_date.\n days (int, optional): Number of days to subtract from current date. Default is 365.\n\n Returns:\n df (object): Filtered dataframe containing only records from the past X days.\n ' subtracted_date = date_subtract(datetime.today(), days) df = df[(df[date_column] >= subtracted_date)] return df
Select the last X days from a Pandas dataframe. Args: df (object): Pandas dataframe containing time series data. date_column (str, optional): Name of column containing date. Default is order_date. days (int, optional): Number of days to subtract from current date. Default is 365. Returns: df (object): Filtered dataframe containing only records from the past X days.
ecommercetools/utilities/tools.py
select_last_x_days
admariner/ecommercetools
43
python
def select_last_x_days(df, date_column='order_date', days=365): 'Select the last X days from a Pandas dataframe.\n\n Args:\n df (object): Pandas dataframe containing time series data.\n date_column (str, optional): Name of column containing date. Default is order_date.\n days (int, optional): Number of days to subtract from current date. Default is 365.\n\n Returns:\n df (object): Filtered dataframe containing only records from the past X days.\n ' subtracted_date = date_subtract(datetime.today(), days) df = df[(df[date_column] >= subtracted_date)] return df
def select_last_x_days(df, date_column='order_date', days=365): 'Select the last X days from a Pandas dataframe.\n\n Args:\n df (object): Pandas dataframe containing time series data.\n date_column (str, optional): Name of column containing date. Default is order_date.\n days (int, optional): Number of days to subtract from current date. Default is 365.\n\n Returns:\n df (object): Filtered dataframe containing only records from the past X days.\n ' subtracted_date = date_subtract(datetime.today(), days) df = df[(df[date_column] >= subtracted_date)] return df<|docstring|>Select the last X days from a Pandas dataframe. Args: df (object): Pandas dataframe containing time series data. date_column (str, optional): Name of column containing date. Default is order_date. days (int, optional): Number of days to subtract from current date. Default is 365. Returns: df (object): Filtered dataframe containing only records from the past X days.<|endoftext|>
12d9a94a335fc187263591be8a197f3bee15d2b78b84415eb69115793229f039
async def execute(self, query): "Execute an EdgeQL command (or commands).\n\n Example:\n\n .. code-block:: pycon\n\n >>> await con.execute('''\n ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };\n ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };\n ... ''')\n " (await self._protocol.simple_query(query))
Execute an EdgeQL command (or commands). Example: .. code-block:: pycon >>> await con.execute(''' ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 }; ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x }; ... ''')
edgedb/asyncio_con.py
execute
sanketsaurav/edgedb-python
0
python
async def execute(self, query): "Execute an EdgeQL command (or commands).\n\n Example:\n\n .. code-block:: pycon\n\n >>> await con.execute('\n ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };\n ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };\n ... ')\n " (await self._protocol.simple_query(query))
async def execute(self, query): "Execute an EdgeQL command (or commands).\n\n Example:\n\n .. code-block:: pycon\n\n >>> await con.execute('\n ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };\n ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };\n ... ')\n " (await self._protocol.simple_query(query))<|docstring|>Execute an EdgeQL command (or commands). Example: .. code-block:: pycon >>> await con.execute(''' ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 }; ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x }; ... ''')<|endoftext|>
e63da4dc729aba2cb62d9f558978250278f6374a551d227203d85f76a2a45285
def _reward(self, action, goal_speed) -> float: '\n The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.\n :param action: the last action performed\n :return: the corresponding reward\n ' scaled_speed = utils.lmap(self.vehicle.speed, self.config['reward_speed_range'], [0, 1]) action_speed = action[0] steering = abs(action[1]) speed_difference = utils.lmap(abs((self.vehicle.speed - goal_speed)), [0, 15], [0, 1]) reward = (((((self.config['collision_reward'] * self.vehicle.crashed) + (self.config['high_speed_reward'] * np.clip(scaled_speed, 0, 1))) + (self.config['acceleration reward'] * action_speed)) + (self.config['lane_change_reward'] * steering)) + (self.config['goal_diffrence_reward'] * speed_difference)) reward = utils.lmap(reward, [(((self.config['collision_reward'] - self.config['acceleration reward']) + self.config['lane_change_reward']) + self.config['goal_diffrence_reward']), (self.config['high_speed_reward'] + self.config['acceleration reward'])], [0, 1]) reward = (0 if (not self.vehicle.on_road) else reward) return reward
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions. :param action: the last action performed :return: the corresponding reward
highway_env/envs/highway_meta.py
_reward
hoaklee/highway-env
0
python
def _reward(self, action, goal_speed) -> float: '\n The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.\n :param action: the last action performed\n :return: the corresponding reward\n ' scaled_speed = utils.lmap(self.vehicle.speed, self.config['reward_speed_range'], [0, 1]) action_speed = action[0] steering = abs(action[1]) speed_difference = utils.lmap(abs((self.vehicle.speed - goal_speed)), [0, 15], [0, 1]) reward = (((((self.config['collision_reward'] * self.vehicle.crashed) + (self.config['high_speed_reward'] * np.clip(scaled_speed, 0, 1))) + (self.config['acceleration reward'] * action_speed)) + (self.config['lane_change_reward'] * steering)) + (self.config['goal_diffrence_reward'] * speed_difference)) reward = utils.lmap(reward, [(((self.config['collision_reward'] - self.config['acceleration reward']) + self.config['lane_change_reward']) + self.config['goal_diffrence_reward']), (self.config['high_speed_reward'] + self.config['acceleration reward'])], [0, 1]) reward = (0 if (not self.vehicle.on_road) else reward) return reward
def _reward(self, action, goal_speed) -> float: '\n The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.\n :param action: the last action performed\n :return: the corresponding reward\n ' scaled_speed = utils.lmap(self.vehicle.speed, self.config['reward_speed_range'], [0, 1]) action_speed = action[0] steering = abs(action[1]) speed_difference = utils.lmap(abs((self.vehicle.speed - goal_speed)), [0, 15], [0, 1]) reward = (((((self.config['collision_reward'] * self.vehicle.crashed) + (self.config['high_speed_reward'] * np.clip(scaled_speed, 0, 1))) + (self.config['acceleration reward'] * action_speed)) + (self.config['lane_change_reward'] * steering)) + (self.config['goal_diffrence_reward'] * speed_difference)) reward = utils.lmap(reward, [(((self.config['collision_reward'] - self.config['acceleration reward']) + self.config['lane_change_reward']) + self.config['goal_diffrence_reward']), (self.config['high_speed_reward'] + self.config['acceleration reward'])], [0, 1]) reward = (0 if (not self.vehicle.on_road) else reward) return reward<|docstring|>The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions. :param action: the last action performed :return: the corresponding reward<|endoftext|>
e76e73884d4bde7fb3da48e1cbe1f62fa1baeb272491e4f3e2c57644fc238a20
def step(self, action): '\n Perform an action and step the environment dynamics.\n The action is executed by the ego-vehicle, and all other vehicles on the road performs their default behaviour\n for several simulation timesteps until the next decision making step.\n\n :param action: the action performed by the ego-vehicle\n :return: a tuple (observation, reward, terminal, info)\n ' if ((self.road is None) or (self.vehicle is None)): raise NotImplementedError('The road and vehicle must be initialized in the environment implementation') self.steps += 1 self._simulate(action) obs = self.observation_type.observe() reward = self._reward(action, self._goal) terminal = self._is_terminal() info = self._info(obs, action) return (obs, reward, terminal, info)
Perform an action and step the environment dynamics. The action is executed by the ego-vehicle, and all other vehicles on the road performs their default behaviour for several simulation timesteps until the next decision making step. :param action: the action performed by the ego-vehicle :return: a tuple (observation, reward, terminal, info)
highway_env/envs/highway_meta.py
step
hoaklee/highway-env
0
python
def step(self, action): '\n Perform an action and step the environment dynamics.\n The action is executed by the ego-vehicle, and all other vehicles on the road performs their default behaviour\n for several simulation timesteps until the next decision making step.\n\n :param action: the action performed by the ego-vehicle\n :return: a tuple (observation, reward, terminal, info)\n ' if ((self.road is None) or (self.vehicle is None)): raise NotImplementedError('The road and vehicle must be initialized in the environment implementation') self.steps += 1 self._simulate(action) obs = self.observation_type.observe() reward = self._reward(action, self._goal) terminal = self._is_terminal() info = self._info(obs, action) return (obs, reward, terminal, info)
def step(self, action): '\n Perform an action and step the environment dynamics.\n The action is executed by the ego-vehicle, and all other vehicles on the road performs their default behaviour\n for several simulation timesteps until the next decision making step.\n\n :param action: the action performed by the ego-vehicle\n :return: a tuple (observation, reward, terminal, info)\n ' if ((self.road is None) or (self.vehicle is None)): raise NotImplementedError('The road and vehicle must be initialized in the environment implementation') self.steps += 1 self._simulate(action) obs = self.observation_type.observe() reward = self._reward(action, self._goal) terminal = self._is_terminal() info = self._info(obs, action) return (obs, reward, terminal, info)<|docstring|>Perform an action and step the environment dynamics. The action is executed by the ego-vehicle, and all other vehicles on the road performs their default behaviour for several simulation timesteps until the next decision making step. :param action: the action performed by the ego-vehicle :return: a tuple (observation, reward, terminal, info)<|endoftext|>
d70892e18f098030e4655d1cbca76eb4cd652ae120233aa8805067b7d8ec8f76
def _info(self, obs, action): '\n Return a dictionary of additional information\n\n :param obs: current observation\n :param action: current action\n :return: info dict\n ' info = {'speed': self.vehicle.speed, 'crashed': self.vehicle.crashed, 'action': action} try: info['cost'] = self._cost(action) info['goal'] = self._task except NotImplementedError: pass return info
Return a dictionary of additional information :param obs: current observation :param action: current action :return: info dict
highway_env/envs/highway_meta.py
_info
hoaklee/highway-env
0
python
def _info(self, obs, action): '\n Return a dictionary of additional information\n\n :param obs: current observation\n :param action: current action\n :return: info dict\n ' info = {'speed': self.vehicle.speed, 'crashed': self.vehicle.crashed, 'action': action} try: info['cost'] = self._cost(action) info['goal'] = self._task except NotImplementedError: pass return info
def _info(self, obs, action): '\n Return a dictionary of additional information\n\n :param obs: current observation\n :param action: current action\n :return: info dict\n ' info = {'speed': self.vehicle.speed, 'crashed': self.vehicle.crashed, 'action': action} try: info['cost'] = self._cost(action) info['goal'] = self._task except NotImplementedError: pass return info<|docstring|>Return a dictionary of additional information :param obs: current observation :param action: current action :return: info dict<|endoftext|>
e84f7ef99b9ed7b0ffac525153a69199360567de5fdf754a191d9f943a452701
def __init__(self, x, y): 'Stores x and y' self.x = x self.y = y
Stores x and y
src/bricks/types/point.py
__init__
SandroWissmann/Bricks-Py
0
python
def __init__(self, x, y): self.x = x self.y = y
def __init__(self, x, y): self.x = x self.y = y<|docstring|>Stores x and y<|endoftext|>
17072ba21ce036071af0dc3df59d39fcf8e8c7a0ce0d391e7357d731005de3f8
def load_config(self, config): 'Load a configuration.\n\n Uses ``config`` if not None. Otherwise the method will try to load\n the config from a Flask configuration variable (named using the\n ``config_name`` attribute). Last it will use the config provided in\n ``default_config``.\n\n :param config: A service configuration or None.\n ' return (config or load_or_import_from_config(self.config_name, default=self.default_config))
Load a configuration. Uses ``config`` if not None. Otherwise the method will try to load the config from a Flask configuration variable (named using the ``config_name`` attribute). Last it will use the config provided in ``default_config``. :param config: A service configuration or None.
invenio_records_resources/config.py
load_config
diegodelemos/invenio-records-resources
0
python
def load_config(self, config): 'Load a configuration.\n\n Uses ``config`` if not None. Otherwise the method will try to load\n the config from a Flask configuration variable (named using the\n ``config_name`` attribute). Last it will use the config provided in\n ``default_config``.\n\n :param config: A service configuration or None.\n ' return (config or load_or_import_from_config(self.config_name, default=self.default_config))
def load_config(self, config): 'Load a configuration.\n\n Uses ``config`` if not None. Otherwise the method will try to load\n the config from a Flask configuration variable (named using the\n ``config_name`` attribute). Last it will use the config provided in\n ``default_config``.\n\n :param config: A service configuration or None.\n ' return (config or load_or_import_from_config(self.config_name, default=self.default_config))<|docstring|>Load a configuration. Uses ``config`` if not None. Otherwise the method will try to load the config from a Flask configuration variable (named using the ``config_name`` attribute). Last it will use the config provided in ``default_config``. :param config: A service configuration or None.<|endoftext|>
3f3d7f4a59b4afc08658cda46c28c7f4f2a2277815c9b361e19b097ac0da7886
def f1_score(y_true, y_score, threshold='auto'): "Plot the F1 score.\n\n Parameters\n ----------\n y_true : numpy.ndarray\n The target vector.\n y_score : numpy.ndarray\n The score vector.\n threshold : 'auto' or float\n Increasing thresholds on the decision function used to compute precision and recall.\n\n Returns\n -------\n score : float\n " msg = 'mode must be "auto" or 0 to 1.' assert ((threshold == 'auto') or (0 <= threshold < 1)), msg if (threshold == 'auto'): (fpr, tpr, thresholds) = metrics.roc_curve(y_true, y_score) gmeans = np.sqrt((tpr * (1 - fpr))) threshold = thresholds[np.argmax(gmeans)] score = metrics.f1_score(y_true, (y_score > threshold)) return score
Plot the F1 score. Parameters ---------- y_true : numpy.ndarray The target vector. y_score : numpy.ndarray The score vector. threshold : 'auto' or float Increasing thresholds on the decision function used to compute precision and recall. Returns ------- score : float
pycalf/metrics.py
f1_score
konumaru/pycalf
2
python
def f1_score(y_true, y_score, threshold='auto'): "Plot the F1 score.\n\n Parameters\n ----------\n y_true : numpy.ndarray\n The target vector.\n y_score : numpy.ndarray\n The score vector.\n threshold : 'auto' or float\n Increasing thresholds on the decision function used to compute precision and recall.\n\n Returns\n -------\n score : float\n " msg = 'mode must be "auto" or 0 to 1.' assert ((threshold == 'auto') or (0 <= threshold < 1)), msg if (threshold == 'auto'): (fpr, tpr, thresholds) = metrics.roc_curve(y_true, y_score) gmeans = np.sqrt((tpr * (1 - fpr))) threshold = thresholds[np.argmax(gmeans)] score = metrics.f1_score(y_true, (y_score > threshold)) return score
def f1_score(y_true, y_score, threshold='auto'): "Plot the F1 score.\n\n Parameters\n ----------\n y_true : numpy.ndarray\n The target vector.\n y_score : numpy.ndarray\n The score vector.\n threshold : 'auto' or float\n Increasing thresholds on the decision function used to compute precision and recall.\n\n Returns\n -------\n score : float\n " msg = 'mode must be "auto" or 0 to 1.' assert ((threshold == 'auto') or (0 <= threshold < 1)), msg if (threshold == 'auto'): (fpr, tpr, thresholds) = metrics.roc_curve(y_true, y_score) gmeans = np.sqrt((tpr * (1 - fpr))) threshold = thresholds[np.argmax(gmeans)] score = metrics.f1_score(y_true, (y_score > threshold)) return score<|docstring|>Plot the F1 score. Parameters ---------- y_true : numpy.ndarray The target vector. y_score : numpy.ndarray The score vector. threshold : 'auto' or float Increasing thresholds on the decision function used to compute precision and recall. Returns ------- score : float<|endoftext|>
d026273b26aa79734fcdd355dcf26fa98966f99dddaf7191d6ec0730b59205dc
def fit(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' if (weight is None): weight = np.ones(X.shape[0]) treat_avg = np.average(X[treatment], weights=weight[treatment], axis=0) treat_var = np.average(np.square((X[treatment] - treat_avg)), weights=weight[treatment], axis=0) control_avg = np.average(X[(~ treatment)], weights=weight[(~ treatment)], axis=0) control_var = np.average(np.square((X[(~ treatment)] - control_avg)), weights=weight[(~ treatment)], axis=0) data_size = X.shape[0] treat_size = np.sum(treatment) control_size = np.sum((~ treatment)) sc = np.sqrt((((treat_size * treat_var) + (control_size * control_var)) / data_size)) d_value = (np.abs((treat_avg - control_avg)) / sc) self.effect_size = np.array(d_value) self.effect_name = X.columns.to_numpy()
Fit the model with X. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. weight : np.array The weight of each sample. Returns ------- None
pycalf/metrics.py
fit
konumaru/pycalf
2
python
def fit(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' if (weight is None): weight = np.ones(X.shape[0]) treat_avg = np.average(X[treatment], weights=weight[treatment], axis=0) treat_var = np.average(np.square((X[treatment] - treat_avg)), weights=weight[treatment], axis=0) control_avg = np.average(X[(~ treatment)], weights=weight[(~ treatment)], axis=0) control_var = np.average(np.square((X[(~ treatment)] - control_avg)), weights=weight[(~ treatment)], axis=0) data_size = X.shape[0] treat_size = np.sum(treatment) control_size = np.sum((~ treatment)) sc = np.sqrt((((treat_size * treat_var) + (control_size * control_var)) / data_size)) d_value = (np.abs((treat_avg - control_avg)) / sc) self.effect_size = np.array(d_value) self.effect_name = X.columns.to_numpy()
def fit(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' if (weight is None): weight = np.ones(X.shape[0]) treat_avg = np.average(X[treatment], weights=weight[treatment], axis=0) treat_var = np.average(np.square((X[treatment] - treat_avg)), weights=weight[treatment], axis=0) control_avg = np.average(X[(~ treatment)], weights=weight[(~ treatment)], axis=0) control_var = np.average(np.square((X[(~ treatment)] - control_avg)), weights=weight[(~ treatment)], axis=0) data_size = X.shape[0] treat_size = np.sum(treatment) control_size = np.sum((~ treatment)) sc = np.sqrt((((treat_size * treat_var) + (control_size * control_var)) / data_size)) d_value = (np.abs((treat_avg - control_avg)) / sc) self.effect_size = np.array(d_value) self.effect_name = X.columns.to_numpy()<|docstring|>Fit the model with X. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. weight : np.array The weight of each sample. Returns ------- None<|endoftext|>
f2d8629ebe205d69eedb1109fe529e62300998557cc2b2a7580ff2d3fb5e8c80
def transform(self): 'Apply the calculating the effect size d.\n\n Returns\n -------\n (effect_name, effect_size) : tuple\n ' return (self.effect_name, self.effect_size)
Apply the calculating the effect size d. Returns ------- (effect_name, effect_size) : tuple
pycalf/metrics.py
transform
konumaru/pycalf
2
python
def transform(self): 'Apply the calculating the effect size d.\n\n Returns\n -------\n (effect_name, effect_size) : tuple\n ' return (self.effect_name, self.effect_size)
def transform(self): 'Apply the calculating the effect size d.\n\n Returns\n -------\n (effect_name, effect_size) : tuple\n ' return (self.effect_name, self.effect_size)<|docstring|>Apply the calculating the effect size d. Returns ------- (effect_name, effect_size) : tuple<|endoftext|>
c3617d0a7c6f71a75bd07b8d41a1b7a359e7486151589286c38c2da14bd4da75
def fit_transform(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n pd.DataFrame\n ' self.fit(X, treatment, weight) return self.transform()
Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. weight : np.array The weight of each sample. Returns ------- pd.DataFrame
pycalf/metrics.py
fit_transform
konumaru/pycalf
2
python
def fit_transform(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n pd.DataFrame\n ' self.fit(X, treatment, weight) return self.transform()
def fit_transform(self, X: pd.DataFrame, treatment: np.ndarray, weight: np.ndarray=None): 'Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n pd.DataFrame\n ' self.fit(X, treatment, weight) return self.transform()<|docstring|>Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. weight : np.array The weight of each sample. Returns ------- pd.DataFrame<|endoftext|>
4836ea76f0647d33e43c8c8b6a104f181227ba236ed773968b213b304b204561
def fit(self, X: pd.DataFrame, treatment: pd.Series, y: pd.Series, weight: np.array=None): 'Fit the model with X, y and weight.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n y : pd.Series\n Outcome variables.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' is_treat = (treatment == 1) self.treat_result = sm.WLS(y[is_treat], X[is_treat], weights=weight[is_treat]).fit() self.control_result = sm.WLS(y[(~ is_treat)], X[(~ is_treat)], weights=weight[(~ is_treat)]).fit()
Fit the model with X, y and weight. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. y : pd.Series Outcome variables. weight : np.array The weight of each sample. Returns ------- None
pycalf/metrics.py
fit
konumaru/pycalf
2
python
def fit(self, X: pd.DataFrame, treatment: pd.Series, y: pd.Series, weight: np.array=None): 'Fit the model with X, y and weight.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n y : pd.Series\n Outcome variables.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' is_treat = (treatment == 1) self.treat_result = sm.WLS(y[is_treat], X[is_treat], weights=weight[is_treat]).fit() self.control_result = sm.WLS(y[(~ is_treat)], X[(~ is_treat)], weights=weight[(~ is_treat)]).fit()
def fit(self, X: pd.DataFrame, treatment: pd.Series, y: pd.Series, weight: np.array=None): 'Fit the model with X, y and weight.\n\n Parameters\n ----------\n X : pd.DataFrame\n Covariates for propensity score.\n treatment : pd.Series\n Flags with or without intervention.\n y : pd.Series\n Outcome variables.\n weight : np.array\n The weight of each sample.\n\n Returns\n -------\n None\n ' is_treat = (treatment == 1) self.treat_result = sm.WLS(y[is_treat], X[is_treat], weights=weight[is_treat]).fit() self.control_result = sm.WLS(y[(~ is_treat)], X[(~ is_treat)], weights=weight[(~ is_treat)]).fit()<|docstring|>Fit the model with X, y and weight. Parameters ---------- X : pd.DataFrame Covariates for propensity score. treatment : pd.Series Flags with or without intervention. y : pd.Series Outcome variables. weight : np.array The weight of each sample. Returns ------- None<|endoftext|>
7e2cf7a96bfb4b45ccb8e1bc09aae401d10fe3f59d2ffdb2b22ddf22aaab9e4b
def transform(self): 'Apply the estimating the effect of the intervention by attribute.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n pd.DataFrame\n ' result = pd.DataFrame() models = [self.control_result, self.treat_result] for (i, model) in enumerate(models): result[f'Z{i}_effect'] = model.params.round(1) result[f'Z{i}_tvalue'] = model.tvalues.round(2).apply((lambda x: ((str(x) + '**') if (abs(x) >= 1.96) else str(x)))) result['Lift'] = (result['Z1_effect'] - result['Z0_effect']) result_df = result.sort_values(by='Lift') self.effect = result_df return result_df
Apply the estimating the effect of the intervention by attribute. Parameters ---------- None Returns ------- pd.DataFrame
pycalf/metrics.py
transform
konumaru/pycalf
2
python
def transform(self): 'Apply the estimating the effect of the intervention by attribute.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n pd.DataFrame\n ' result = pd.DataFrame() models = [self.control_result, self.treat_result] for (i, model) in enumerate(models): result[f'Z{i}_effect'] = model.params.round(1) result[f'Z{i}_tvalue'] = model.tvalues.round(2).apply((lambda x: ((str(x) + '**') if (abs(x) >= 1.96) else str(x)))) result['Lift'] = (result['Z1_effect'] - result['Z0_effect']) result_df = result.sort_values(by='Lift') self.effect = result_df return result_df
def transform(self): 'Apply the estimating the effect of the intervention by attribute.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n pd.DataFrame\n ' result = pd.DataFrame() models = [self.control_result, self.treat_result] for (i, model) in enumerate(models): result[f'Z{i}_effect'] = model.params.round(1) result[f'Z{i}_tvalue'] = model.tvalues.round(2).apply((lambda x: ((str(x) + '**') if (abs(x) >= 1.96) else str(x)))) result['Lift'] = (result['Z1_effect'] - result['Z0_effect']) result_df = result.sort_values(by='Lift') self.effect = result_df return result_df<|docstring|>Apply the estimating the effect of the intervention by attribute. Parameters ---------- None Returns ------- pd.DataFrame<|endoftext|>
b1f707ce01c02dcb78b648fe7c0f8433192d4923e21e309ea0b1046028292abc
def plot_lift_values(self, figsize: tuple=(12, 6)): 'Plot the effect.\n\n Parameters\n ----------\n figsize : tuple\n Figure dimension ``(width, height)`` in inches.\n\n Returns\n -------\n ' plt.figure(figsize=figsize) plt.title('Treatment Lift Values') plt.bar(self.effect.index, self.effect['Lift'].values) plt.ylabel('Lift Value') plt.xticks(rotation=90) plt.tight_layout() plt.show()
Plot the effect. Parameters ---------- figsize : tuple Figure dimension ``(width, height)`` in inches. Returns -------
pycalf/metrics.py
plot_lift_values
konumaru/pycalf
2
python
def plot_lift_values(self, figsize: tuple=(12, 6)): 'Plot the effect.\n\n Parameters\n ----------\n figsize : tuple\n Figure dimension ``(width, height)`` in inches.\n\n Returns\n -------\n ' plt.figure(figsize=figsize) plt.title('Treatment Lift Values') plt.bar(self.effect.index, self.effect['Lift'].values) plt.ylabel('Lift Value') plt.xticks(rotation=90) plt.tight_layout() plt.show()
def plot_lift_values(self, figsize: tuple=(12, 6)): 'Plot the effect.\n\n Parameters\n ----------\n figsize : tuple\n Figure dimension ``(width, height)`` in inches.\n\n Returns\n -------\n ' plt.figure(figsize=figsize) plt.title('Treatment Lift Values') plt.bar(self.effect.index, self.effect['Lift'].values) plt.ylabel('Lift Value') plt.xticks(rotation=90) plt.tight_layout() plt.show()<|docstring|>Plot the effect. Parameters ---------- figsize : tuple Figure dimension ``(width, height)`` in inches. Returns -------<|endoftext|>
f1ad6c0ecf1aefabb79a837a8009c2b7cf9fdb9abe099206475454a70a4fb3e5
def fit(self, data: pd.DataFrame): 'Fit the model with data.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n None\n ' vif = pd.DataFrame(index=data.columns.tolist(), columns=['VIF'], dtype='float64') for feature in data.columns.tolist(): X = data.drop([feature], axis=1) y = data[feature] model = linear_model.OLS(endog=y, exog=X) r2 = model.fit().rsquared vif.loc[(feature, 'VIF')] = np.round((1 / (1 - r2)), 2) self.result = vif
Fit the model with data. Parameters ---------- data : pd.DataFrame Returns ------- None
pycalf/metrics.py
fit
konumaru/pycalf
2
python
def fit(self, data: pd.DataFrame): 'Fit the model with data.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n None\n ' vif = pd.DataFrame(index=data.columns.tolist(), columns=['VIF'], dtype='float64') for feature in data.columns.tolist(): X = data.drop([feature], axis=1) y = data[feature] model = linear_model.OLS(endog=y, exog=X) r2 = model.fit().rsquared vif.loc[(feature, 'VIF')] = np.round((1 / (1 - r2)), 2) self.result = vif
def fit(self, data: pd.DataFrame): 'Fit the model with data.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n None\n ' vif = pd.DataFrame(index=data.columns.tolist(), columns=['VIF'], dtype='float64') for feature in data.columns.tolist(): X = data.drop([feature], axis=1) y = data[feature] model = linear_model.OLS(endog=y, exog=X) r2 = model.fit().rsquared vif.loc[(feature, 'VIF')] = np.round((1 / (1 - r2)), 2) self.result = vif<|docstring|>Fit the model with data. Parameters ---------- data : pd.DataFrame Returns ------- None<|endoftext|>
d8a739d091f4d101b1443660efe10bcd8f1184f875161ce687f3bd0fea5e0c50
def transform(self): 'Apply the calculating vif.\n\n Returns\n -------\n result : pd.DataFrame\n ' return self.result
Apply the calculating vif. Returns ------- result : pd.DataFrame
pycalf/metrics.py
transform
konumaru/pycalf
2
python
def transform(self): 'Apply the calculating vif.\n\n Returns\n -------\n result : pd.DataFrame\n ' return self.result
def transform(self): 'Apply the calculating vif.\n\n Returns\n -------\n result : pd.DataFrame\n ' return self.result<|docstring|>Apply the calculating vif. Returns ------- result : pd.DataFrame<|endoftext|>
d350c2134930a61bd93e34b8ae5762f8bf8fb491ab09ed557f0b5c9a62a67acd
def fit_transform(self, data: pd.DataFrame, **kwargs): 'Fit the model with data and apply the calculating vif.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n result : pd.DataFrame\n ' self.fit(data, **kwargs) return self.transform()
Fit the model with data and apply the calculating vif. Parameters ---------- data : pd.DataFrame Returns ------- result : pd.DataFrame
pycalf/metrics.py
fit_transform
konumaru/pycalf
2
python
def fit_transform(self, data: pd.DataFrame, **kwargs): 'Fit the model with data and apply the calculating vif.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n result : pd.DataFrame\n ' self.fit(data, **kwargs) return self.transform()
def fit_transform(self, data: pd.DataFrame, **kwargs): 'Fit the model with data and apply the calculating vif.\n\n Parameters\n ----------\n data : pd.DataFrame\n\n Returns\n -------\n result : pd.DataFrame\n ' self.fit(data, **kwargs) return self.transform()<|docstring|>Fit the model with data and apply the calculating vif. Parameters ---------- data : pd.DataFrame Returns ------- result : pd.DataFrame<|endoftext|>
c0b46c559ca4286ec9a40c6c678329e8b4e5eaad0887415d2d35e1d7535ea49f
def string_escape(text: str) -> str: 'Escape values special to javascript in strings.\n\n With this we should be able to use something like:\n elem.evaluateJavaScript("this.value=\'{}\'".format(string_escape(...)))\n And all values should work.\n ' replacements = (('\\', '\\\\'), ("'", "\\'"), ('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\x00', '\\x00'), ('\ufeff', '\\ufeff'), ('\u2028', '\\u2028'), ('\u2029', '\\u2029')) for (orig, repl) in replacements: text = text.replace(orig, repl) return text
Escape values special to javascript in strings. With this we should be able to use something like: elem.evaluateJavaScript("this.value='{}'".format(string_escape(...))) And all values should work.
Lib/site-packages/qutebrowser/utils/javascript.py
string_escape
fochoao/cpython
0
python
def string_escape(text: str) -> str: 'Escape values special to javascript in strings.\n\n With this we should be able to use something like:\n elem.evaluateJavaScript("this.value=\'{}\'".format(string_escape(...)))\n And all values should work.\n ' replacements = (('\\', '\\\\'), ("'", "\\'"), ('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\x00', '\\x00'), ('\ufeff', '\\ufeff'), ('\u2028', '\\u2028'), ('\u2029', '\\u2029')) for (orig, repl) in replacements: text = text.replace(orig, repl) return text
def string_escape(text: str) -> str: 'Escape values special to javascript in strings.\n\n With this we should be able to use something like:\n elem.evaluateJavaScript("this.value=\'{}\'".format(string_escape(...)))\n And all values should work.\n ' replacements = (('\\', '\\\\'), ("'", "\\'"), ('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\x00', '\\x00'), ('\ufeff', '\\ufeff'), ('\u2028', '\\u2028'), ('\u2029', '\\u2029')) for (orig, repl) in replacements: text = text.replace(orig, repl) return text<|docstring|>Escape values special to javascript in strings. With this we should be able to use something like: elem.evaluateJavaScript("this.value='{}'".format(string_escape(...))) And all values should work.<|endoftext|>
37f91f7e437b3aaead529dd95de62e754d08d54b9de4b45bcd3b5d77a1f9e4ea
def to_js(arg: _JsArgType) -> str: "Convert the given argument so it's the equivalent in JS." if (arg is None): return 'undefined' elif isinstance(arg, str): return '"{}"'.format(string_escape(arg)) elif isinstance(arg, bool): return str(arg).lower() elif isinstance(arg, (int, float)): return str(arg) elif isinstance(arg, list): return '[{}]'.format(', '.join((to_js(e) for e in arg))) else: raise TypeError("Don't know how to handle {!r} of type {}!".format(arg, type(arg).__name__))
Convert the given argument so it's the equivalent in JS.
Lib/site-packages/qutebrowser/utils/javascript.py
to_js
fochoao/cpython
0
python
def to_js(arg: _JsArgType) -> str: if (arg is None): return 'undefined' elif isinstance(arg, str): return '"{}"'.format(string_escape(arg)) elif isinstance(arg, bool): return str(arg).lower() elif isinstance(arg, (int, float)): return str(arg) elif isinstance(arg, list): return '[{}]'.format(', '.join((to_js(e) for e in arg))) else: raise TypeError("Don't know how to handle {!r} of type {}!".format(arg, type(arg).__name__))
def to_js(arg: _JsArgType) -> str: if (arg is None): return 'undefined' elif isinstance(arg, str): return '"{}"'.format(string_escape(arg)) elif isinstance(arg, bool): return str(arg).lower() elif isinstance(arg, (int, float)): return str(arg) elif isinstance(arg, list): return '[{}]'.format(', '.join((to_js(e) for e in arg))) else: raise TypeError("Don't know how to handle {!r} of type {}!".format(arg, type(arg).__name__))<|docstring|>Convert the given argument so it's the equivalent in JS.<|endoftext|>
229657e1eb38579a16b801b7891c0c43cb7eb7a4ebb706d07e16899e63dada41
def assemble(module: str, function: str, *args: _JsArgType) -> str: 'Assemble a javascript file and a function call.' js_args = ', '.join((to_js(arg) for arg in args)) if (module == 'window'): parts = ['window', function] else: parts = ['window', '_qutebrowser', module, function] code = '"use strict";\n{}({});'.format('.'.join(parts), js_args) return code
Assemble a javascript file and a function call.
Lib/site-packages/qutebrowser/utils/javascript.py
assemble
fochoao/cpython
0
python
def assemble(module: str, function: str, *args: _JsArgType) -> str: js_args = ', '.join((to_js(arg) for arg in args)) if (module == 'window'): parts = ['window', function] else: parts = ['window', '_qutebrowser', module, function] code = '"use strict";\n{}({});'.format('.'.join(parts), js_args) return code
def assemble(module: str, function: str, *args: _JsArgType) -> str: js_args = ', '.join((to_js(arg) for arg in args)) if (module == 'window'): parts = ['window', function] else: parts = ['window', '_qutebrowser', module, function] code = '"use strict";\n{}({});'.format('.'.join(parts), js_args) return code<|docstring|>Assemble a javascript file and a function call.<|endoftext|>
1643b90311dc417f0c1d809898ad3acaf08aee0471592bf247f1a3c92402d4eb
def wrap_global(name: str, *sources: str) -> str: 'Wrap a script using window._qutebrowser.' from qutebrowser.utils import jinja template = jinja.js_environment.get_template('global_wrapper.js') return template.render(code='\n'.join(sources), name=name)
Wrap a script using window._qutebrowser.
Lib/site-packages/qutebrowser/utils/javascript.py
wrap_global
fochoao/cpython
0
python
def wrap_global(name: str, *sources: str) -> str: from qutebrowser.utils import jinja template = jinja.js_environment.get_template('global_wrapper.js') return template.render(code='\n'.join(sources), name=name)
def wrap_global(name: str, *sources: str) -> str: from qutebrowser.utils import jinja template = jinja.js_environment.get_template('global_wrapper.js') return template.render(code='\n'.join(sources), name=name)<|docstring|>Wrap a script using window._qutebrowser.<|endoftext|>
9f00d1d12e4932d2c6e7c3cb9d16147f3b01a5dc513552ed8ca14903ea434b95
def getLevelName(lvl): '\n Get (numerical) python logging level for (string) spec-defined log level name.\n ' lvl = _ocrdLevel2pythonLevel.get(lvl, lvl) return logging.getLevelName(lvl)
Get (numerical) python logging level for (string) spec-defined log level name.
ocrd/logging.py
getLevelName
saw-leipzig/pyocrd
0
python
def getLevelName(lvl): '\n \n ' lvl = _ocrdLevel2pythonLevel.get(lvl, lvl) return logging.getLevelName(lvl)
def getLevelName(lvl): '\n \n ' lvl = _ocrdLevel2pythonLevel.get(lvl, lvl) return logging.getLevelName(lvl)<|docstring|>Get (numerical) python logging level for (string) spec-defined log level name.<|endoftext|>
9f9d7aea4fd925998e57668af56bbe8b836a6d1e6fbe443f19fbd5cc456499df
def setOverrideLogLevel(lvl): '\n Override all logger filter levels to include lvl and above.\n\n\n - Set root logger level\n - iterates all existing loggers and sets their log level to ``NOTSET``.\n\n Args:\n lvl (string): Log level name.\n ' if (lvl is None): return logging.info('Overriding log level globally to %s', lvl) lvl = getLevelName(lvl) _overrideLogLevel = lvl logging.getLogger('').setLevel(lvl) for loggerName in logging.Logger.manager.loggerDict: logger = logging.Logger.manager.loggerDict[loggerName] if isinstance(logger, logging.PlaceHolder): continue logger.setLevel(logging.NOTSET)
Override all logger filter levels to include lvl and above. - Set root logger level - iterates all existing loggers and sets their log level to ``NOTSET``. Args: lvl (string): Log level name.
ocrd/logging.py
setOverrideLogLevel
saw-leipzig/pyocrd
0
python
def setOverrideLogLevel(lvl): '\n Override all logger filter levels to include lvl and above.\n\n\n - Set root logger level\n - iterates all existing loggers and sets their log level to ``NOTSET``.\n\n Args:\n lvl (string): Log level name.\n ' if (lvl is None): return logging.info('Overriding log level globally to %s', lvl) lvl = getLevelName(lvl) _overrideLogLevel = lvl logging.getLogger().setLevel(lvl) for loggerName in logging.Logger.manager.loggerDict: logger = logging.Logger.manager.loggerDict[loggerName] if isinstance(logger, logging.PlaceHolder): continue logger.setLevel(logging.NOTSET)
def setOverrideLogLevel(lvl): '\n Override all logger filter levels to include lvl and above.\n\n\n - Set root logger level\n - iterates all existing loggers and sets their log level to ``NOTSET``.\n\n Args:\n lvl (string): Log level name.\n ' if (lvl is None): return logging.info('Overriding log level globally to %s', lvl) lvl = getLevelName(lvl) _overrideLogLevel = lvl logging.getLogger().setLevel(lvl) for loggerName in logging.Logger.manager.loggerDict: logger = logging.Logger.manager.loggerDict[loggerName] if isinstance(logger, logging.PlaceHolder): continue logger.setLevel(logging.NOTSET)<|docstring|>Override all logger filter levels to include lvl and above. - Set root logger level - iterates all existing loggers and sets their log level to ``NOTSET``. Args: lvl (string): Log level name.<|endoftext|>
342507eb4bb0d9b7d0a4f9265afac4c54bfd946ecb26fecffe815ba1b64b1dbf
def initLogging(): '\n Sets logging defaults\n ' logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') logging.getLogger('').setLevel(logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) CONFIG_PATHS = [os.path.curdir, os.path.join(os.path.expanduser('~')), '/etc'] for p in CONFIG_PATHS: config_file = os.path.join(p, 'ocrd_logging.py') if os.path.exists(config_file): logging.info("Loading logging configuration from '%s'", config_file) with open(config_file) as f: code = compile(f.read(), config_file, 'exec') exec(code, globals(), locals())
Sets logging defaults
ocrd/logging.py
initLogging
saw-leipzig/pyocrd
0
python
def initLogging(): '\n \n ' logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') logging.getLogger().setLevel(logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) CONFIG_PATHS = [os.path.curdir, os.path.join(os.path.expanduser('~')), '/etc'] for p in CONFIG_PATHS: config_file = os.path.join(p, 'ocrd_logging.py') if os.path.exists(config_file): logging.info("Loading logging configuration from '%s'", config_file) with open(config_file) as f: code = compile(f.read(), config_file, 'exec') exec(code, globals(), locals())
def initLogging(): '\n \n ' logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') logging.getLogger().setLevel(logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) CONFIG_PATHS = [os.path.curdir, os.path.join(os.path.expanduser('~')), '/etc'] for p in CONFIG_PATHS: config_file = os.path.join(p, 'ocrd_logging.py') if os.path.exists(config_file): logging.info("Loading logging configuration from '%s'", config_file) with open(config_file) as f: code = compile(f.read(), config_file, 'exec') exec(code, globals(), locals())<|docstring|>Sets logging defaults<|endoftext|>
a846ef7b29a7e3c7b51532096b8d7587ba88670ebcbd1c8528ad9560fdc09344
def fit_estimator(config: ModelConfig, dataset: DatasetConfig, num_learning_rate_decays: int=3, num_model_checkpoints: int=5, validate: bool=False, verbose: bool=True) -> FitResult: '\n Fits the given estimator using the provided training dataset.\n\n Args:\n config: The configuration of the estimator to be fitted.\n dataset: The configuration of the dataset to be used for fitting.\n num_learning_rate_decays: The number of times the learning rate should be decayed.\n validate: Whether to use a validation dataset.\n choose_best: Whether the best model according to the validation loss within Hyperband\n intervals should be used.\n verbose: Whether to create multiple predictors and log associated information.\n\n Returns:\n The result from fitting, contains most notably the list of predictors fitted during\n training. Contains a single entry if the model is not trainable or `verbose` is set to\n false.\n ' count_callback = ParameterCountCallback() callbacks: List[Callback] = [count_callback] milestones = [] if isinstance(config, TrainConfig): training_time = (config.training_fraction * dataset.max_training_time) else: training_time = 0 with tempfile.TemporaryDirectory() as tmp_dir: saver_callback: ModelSaverCallback if isinstance(config, TrainConfig): hyperband_milestones = [(training_time * ((1 / 3) ** i)) for i in reversed(range(num_model_checkpoints))] for i in range((len(hyperband_milestones) // 2)): milestones += [hyperband_milestones[i]] pivot = hyperband_milestones[(len(hyperband_milestones) // 2)] milestones += np.arange(pivot, (training_time + (pivot / 2)), pivot).tolist() saver_callback = ModelSaverCallback(Path(tmp_dir), milestones) callbacks += [saver_callback] if (num_learning_rate_decays > 0): learning_rate_callback = LearningRateScheduleCallback(milestones=[((training_time / (num_learning_rate_decays + 1)) * i) for i in range(1, (num_learning_rate_decays + 1))]) callbacks += [learning_rate_callback] meta = dataset.meta estimator = config.create_estimator(freq=meta.freq, prediction_length=cast(int, meta.prediction_length), time_features=dataset.has_time_features, training_time=training_time, validation_milestones=(milestones if validate else []), callbacks=callbacks) train_kwargs = {} if (isinstance(config, TrainConfig) and validate): train_kwargs['validation_data'] = dataset.data.val().gluonts() train_data = dataset.data.train(validate).gluonts() predictor = estimator.train(train_data, **train_kwargs) if (not isinstance(config, TrainConfig)): if verbose: log_metric('num_model_parameters', 0) log_metric('num_gradient_updates', 0) log_metric('training_time', 0) return FitResult(config, [predictor], [0.0], 0) predictors = [] model_paths = [] with tempfile.TemporaryDirectory() as model_dir: for (i, params) in enumerate(saver_callback.saved_parameters): saver_callback.network.load_parameters(params.absolute().as_posix()) predictor = cast(TrainConfig, config).create_predictor(estimator, cast(nn.HybridBlock, saver_callback.network)) path = (Path(model_dir) / f'model_{i}') path.mkdir() predictor.serialize(path) model_paths.append(path) copied_predictor = Predictor.deserialize(model_paths[i]) predictors.append(copied_predictor) log_metric('num_model_parameters', count_callback.num_parameters) log_metric('num_gradient_updates', saver_callback.num_gradient_updates[i]) log_metric('training_time', saver_callback.training_times[i]) return FitResult(config, predictors, saver_callback.training_times, count_callback.num_parameters)
Fits the given estimator using the provided training dataset. Args: config: The configuration of the estimator to be fitted. dataset: The configuration of the dataset to be used for fitting. num_learning_rate_decays: The number of times the learning rate should be decayed. validate: Whether to use a validation dataset. choose_best: Whether the best model according to the validation loss within Hyperband intervals should be used. verbose: Whether to create multiple predictors and log associated information. Returns: The result from fitting, contains most notably the list of predictors fitted during training. Contains a single entry if the model is not trainable or `verbose` is set to false.
src/gluonts/nursery/tsbench/src/tsbench/evaluations/training/fit.py
fit_estimator
RingoIngo/gluon-ts
1
python
def fit_estimator(config: ModelConfig, dataset: DatasetConfig, num_learning_rate_decays: int=3, num_model_checkpoints: int=5, validate: bool=False, verbose: bool=True) -> FitResult: '\n Fits the given estimator using the provided training dataset.\n\n Args:\n config: The configuration of the estimator to be fitted.\n dataset: The configuration of the dataset to be used for fitting.\n num_learning_rate_decays: The number of times the learning rate should be decayed.\n validate: Whether to use a validation dataset.\n choose_best: Whether the best model according to the validation loss within Hyperband\n intervals should be used.\n verbose: Whether to create multiple predictors and log associated information.\n\n Returns:\n The result from fitting, contains most notably the list of predictors fitted during\n training. Contains a single entry if the model is not trainable or `verbose` is set to\n false.\n ' count_callback = ParameterCountCallback() callbacks: List[Callback] = [count_callback] milestones = [] if isinstance(config, TrainConfig): training_time = (config.training_fraction * dataset.max_training_time) else: training_time = 0 with tempfile.TemporaryDirectory() as tmp_dir: saver_callback: ModelSaverCallback if isinstance(config, TrainConfig): hyperband_milestones = [(training_time * ((1 / 3) ** i)) for i in reversed(range(num_model_checkpoints))] for i in range((len(hyperband_milestones) // 2)): milestones += [hyperband_milestones[i]] pivot = hyperband_milestones[(len(hyperband_milestones) // 2)] milestones += np.arange(pivot, (training_time + (pivot / 2)), pivot).tolist() saver_callback = ModelSaverCallback(Path(tmp_dir), milestones) callbacks += [saver_callback] if (num_learning_rate_decays > 0): learning_rate_callback = LearningRateScheduleCallback(milestones=[((training_time / (num_learning_rate_decays + 1)) * i) for i in range(1, (num_learning_rate_decays + 1))]) callbacks += [learning_rate_callback] meta = dataset.meta estimator = config.create_estimator(freq=meta.freq, prediction_length=cast(int, meta.prediction_length), time_features=dataset.has_time_features, training_time=training_time, validation_milestones=(milestones if validate else []), callbacks=callbacks) train_kwargs = {} if (isinstance(config, TrainConfig) and validate): train_kwargs['validation_data'] = dataset.data.val().gluonts() train_data = dataset.data.train(validate).gluonts() predictor = estimator.train(train_data, **train_kwargs) if (not isinstance(config, TrainConfig)): if verbose: log_metric('num_model_parameters', 0) log_metric('num_gradient_updates', 0) log_metric('training_time', 0) return FitResult(config, [predictor], [0.0], 0) predictors = [] model_paths = [] with tempfile.TemporaryDirectory() as model_dir: for (i, params) in enumerate(saver_callback.saved_parameters): saver_callback.network.load_parameters(params.absolute().as_posix()) predictor = cast(TrainConfig, config).create_predictor(estimator, cast(nn.HybridBlock, saver_callback.network)) path = (Path(model_dir) / f'model_{i}') path.mkdir() predictor.serialize(path) model_paths.append(path) copied_predictor = Predictor.deserialize(model_paths[i]) predictors.append(copied_predictor) log_metric('num_model_parameters', count_callback.num_parameters) log_metric('num_gradient_updates', saver_callback.num_gradient_updates[i]) log_metric('training_time', saver_callback.training_times[i]) return FitResult(config, predictors, saver_callback.training_times, count_callback.num_parameters)
def fit_estimator(config: ModelConfig, dataset: DatasetConfig, num_learning_rate_decays: int=3, num_model_checkpoints: int=5, validate: bool=False, verbose: bool=True) -> FitResult: '\n Fits the given estimator using the provided training dataset.\n\n Args:\n config: The configuration of the estimator to be fitted.\n dataset: The configuration of the dataset to be used for fitting.\n num_learning_rate_decays: The number of times the learning rate should be decayed.\n validate: Whether to use a validation dataset.\n choose_best: Whether the best model according to the validation loss within Hyperband\n intervals should be used.\n verbose: Whether to create multiple predictors and log associated information.\n\n Returns:\n The result from fitting, contains most notably the list of predictors fitted during\n training. Contains a single entry if the model is not trainable or `verbose` is set to\n false.\n ' count_callback = ParameterCountCallback() callbacks: List[Callback] = [count_callback] milestones = [] if isinstance(config, TrainConfig): training_time = (config.training_fraction * dataset.max_training_time) else: training_time = 0 with tempfile.TemporaryDirectory() as tmp_dir: saver_callback: ModelSaverCallback if isinstance(config, TrainConfig): hyperband_milestones = [(training_time * ((1 / 3) ** i)) for i in reversed(range(num_model_checkpoints))] for i in range((len(hyperband_milestones) // 2)): milestones += [hyperband_milestones[i]] pivot = hyperband_milestones[(len(hyperband_milestones) // 2)] milestones += np.arange(pivot, (training_time + (pivot / 2)), pivot).tolist() saver_callback = ModelSaverCallback(Path(tmp_dir), milestones) callbacks += [saver_callback] if (num_learning_rate_decays > 0): learning_rate_callback = LearningRateScheduleCallback(milestones=[((training_time / (num_learning_rate_decays + 1)) * i) for i in range(1, (num_learning_rate_decays + 1))]) callbacks += [learning_rate_callback] meta = dataset.meta estimator = config.create_estimator(freq=meta.freq, prediction_length=cast(int, meta.prediction_length), time_features=dataset.has_time_features, training_time=training_time, validation_milestones=(milestones if validate else []), callbacks=callbacks) train_kwargs = {} if (isinstance(config, TrainConfig) and validate): train_kwargs['validation_data'] = dataset.data.val().gluonts() train_data = dataset.data.train(validate).gluonts() predictor = estimator.train(train_data, **train_kwargs) if (not isinstance(config, TrainConfig)): if verbose: log_metric('num_model_parameters', 0) log_metric('num_gradient_updates', 0) log_metric('training_time', 0) return FitResult(config, [predictor], [0.0], 0) predictors = [] model_paths = [] with tempfile.TemporaryDirectory() as model_dir: for (i, params) in enumerate(saver_callback.saved_parameters): saver_callback.network.load_parameters(params.absolute().as_posix()) predictor = cast(TrainConfig, config).create_predictor(estimator, cast(nn.HybridBlock, saver_callback.network)) path = (Path(model_dir) / f'model_{i}') path.mkdir() predictor.serialize(path) model_paths.append(path) copied_predictor = Predictor.deserialize(model_paths[i]) predictors.append(copied_predictor) log_metric('num_model_parameters', count_callback.num_parameters) log_metric('num_gradient_updates', saver_callback.num_gradient_updates[i]) log_metric('training_time', saver_callback.training_times[i]) return FitResult(config, predictors, saver_callback.training_times, count_callback.num_parameters)<|docstring|>Fits the given estimator using the provided training dataset. Args: config: The configuration of the estimator to be fitted. dataset: The configuration of the dataset to be used for fitting. num_learning_rate_decays: The number of times the learning rate should be decayed. validate: Whether to use a validation dataset. choose_best: Whether the best model according to the validation loss within Hyperband intervals should be used. verbose: Whether to create multiple predictors and log associated information. Returns: The result from fitting, contains most notably the list of predictors fitted during training. Contains a single entry if the model is not trainable or `verbose` is set to false.<|endoftext|>
c2397e31af57555a2797b72d14705e5300776e25ce56d1af40b27d3eeeeb5646
def odir_limiter(self, odir, max_odirs=(- 1)): 'Function to backup previously run output directory to maintain a\n history of a limited number of output directories. It deletes the output\n directory with the oldest timestamps, if the limit is reached. It returns\n a list of directories that remain after deletion.\n Arguments:\n odir: The output directory to backup\n max_odirs: Maximum output directories to maintain as history.\n\n Returns:\n dirs: Space-separated list of directories that remain after deletion.\n ' try: if os.path.exists(odir): ts = run_cmd(((((("date '+" + self.sim_cfg.ts_format) + '\' -d "') + "$(stat -c '%y' ") + odir) + ')"')) os.system(((((('mv ' + odir) + ' ') + odir) + '_') + ts)) except IOError: log.error('Failed to back up existing output directory %s', odir) dirs = '' try: pdir = os.path.realpath((odir + '/..')) if (pdir == '/'): log.fatal('Something went wrong while processing "%s": odir = "%s"', self.name, odir) sys.exit(1) if os.path.exists(pdir): find_cmd = (('find ' + pdir) + ' -mindepth 1 -maxdepth 1 -type d ') dirs = run_cmd(find_cmd) dirs = dirs.replace('\n', ' ') list_dirs = dirs.split() num_dirs = len(list_dirs) if (max_odirs == (- 1)): max_odirs = self.max_odirs num_rm_dirs = (num_dirs - max_odirs) if (num_rm_dirs > (- 1)): rm_dirs = run_cmd((((find_cmd + "-printf '%T+ %p\n' | sort | head -n ") + str((num_rm_dirs + 1))) + " | awk '{print $2}'")) rm_dirs = rm_dirs.replace('\n', ' ') dirs = dirs.replace(rm_dirs, '') os.system(('/bin/rm -rf ' + rm_dirs)) except IOError: log.error('Failed to delete old run directories!') return dirs
Function to backup previously run output directory to maintain a history of a limited number of output directories. It deletes the output directory with the oldest timestamps, if the limit is reached. It returns a list of directories that remain after deletion. Arguments: odir: The output directory to backup max_odirs: Maximum output directories to maintain as history. Returns: dirs: Space-separated list of directories that remain after deletion.
vendor/lowrisc_ip/dvsim/Deploy.py
odir_limiter
jacekmw8/ibex
1
python
def odir_limiter(self, odir, max_odirs=(- 1)): 'Function to backup previously run output directory to maintain a\n history of a limited number of output directories. It deletes the output\n directory with the oldest timestamps, if the limit is reached. It returns\n a list of directories that remain after deletion.\n Arguments:\n odir: The output directory to backup\n max_odirs: Maximum output directories to maintain as history.\n\n Returns:\n dirs: Space-separated list of directories that remain after deletion.\n ' try: if os.path.exists(odir): ts = run_cmd(((((("date '+" + self.sim_cfg.ts_format) + '\' -d "') + "$(stat -c '%y' ") + odir) + ')"')) os.system(((((('mv ' + odir) + ' ') + odir) + '_') + ts)) except IOError: log.error('Failed to back up existing output directory %s', odir) dirs = try: pdir = os.path.realpath((odir + '/..')) if (pdir == '/'): log.fatal('Something went wrong while processing "%s": odir = "%s"', self.name, odir) sys.exit(1) if os.path.exists(pdir): find_cmd = (('find ' + pdir) + ' -mindepth 1 -maxdepth 1 -type d ') dirs = run_cmd(find_cmd) dirs = dirs.replace('\n', ' ') list_dirs = dirs.split() num_dirs = len(list_dirs) if (max_odirs == (- 1)): max_odirs = self.max_odirs num_rm_dirs = (num_dirs - max_odirs) if (num_rm_dirs > (- 1)): rm_dirs = run_cmd((((find_cmd + "-printf '%T+ %p\n' | sort | head -n ") + str((num_rm_dirs + 1))) + " | awk '{print $2}'")) rm_dirs = rm_dirs.replace('\n', ' ') dirs = dirs.replace(rm_dirs, ) os.system(('/bin/rm -rf ' + rm_dirs)) except IOError: log.error('Failed to delete old run directories!') return dirs
def odir_limiter(self, odir, max_odirs=(- 1)): 'Function to backup previously run output directory to maintain a\n history of a limited number of output directories. It deletes the output\n directory with the oldest timestamps, if the limit is reached. It returns\n a list of directories that remain after deletion.\n Arguments:\n odir: The output directory to backup\n max_odirs: Maximum output directories to maintain as history.\n\n Returns:\n dirs: Space-separated list of directories that remain after deletion.\n ' try: if os.path.exists(odir): ts = run_cmd(((((("date '+" + self.sim_cfg.ts_format) + '\' -d "') + "$(stat -c '%y' ") + odir) + ')"')) os.system(((((('mv ' + odir) + ' ') + odir) + '_') + ts)) except IOError: log.error('Failed to back up existing output directory %s', odir) dirs = try: pdir = os.path.realpath((odir + '/..')) if (pdir == '/'): log.fatal('Something went wrong while processing "%s": odir = "%s"', self.name, odir) sys.exit(1) if os.path.exists(pdir): find_cmd = (('find ' + pdir) + ' -mindepth 1 -maxdepth 1 -type d ') dirs = run_cmd(find_cmd) dirs = dirs.replace('\n', ' ') list_dirs = dirs.split() num_dirs = len(list_dirs) if (max_odirs == (- 1)): max_odirs = self.max_odirs num_rm_dirs = (num_dirs - max_odirs) if (num_rm_dirs > (- 1)): rm_dirs = run_cmd((((find_cmd + "-printf '%T+ %p\n' | sort | head -n ") + str((num_rm_dirs + 1))) + " | awk '{print $2}'")) rm_dirs = rm_dirs.replace('\n', ' ') dirs = dirs.replace(rm_dirs, ) os.system(('/bin/rm -rf ' + rm_dirs)) except IOError: log.error('Failed to delete old run directories!') return dirs<|docstring|>Function to backup previously run output directory to maintain a history of a limited number of output directories. It deletes the output directory with the oldest timestamps, if the limit is reached. It returns a list of directories that remain after deletion. Arguments: odir: The output directory to backup max_odirs: Maximum output directories to maintain as history. Returns: dirs: Space-separated list of directories that remain after deletion.<|endoftext|>
6556def277490b918dbc6f2b6a834071261df5ca18dbf5a9287e5242c6705c2b
def get_status(self): 'Override base class get_status implementation for additional post-status\n actions.' super().get_status() if (self.status not in ['D', 'P']): if os.path.exists(self.cov_db_test_dir): log.log(VERBOSE, 'Deleting coverage data of failing test:\n%s', self.cov_db_test_dir) os.system(('/bin/rm -rf ' + self.cov_db_test_dir))
Override base class get_status implementation for additional post-status actions.
vendor/lowrisc_ip/dvsim/Deploy.py
get_status
jacekmw8/ibex
1
python
def get_status(self): 'Override base class get_status implementation for additional post-status\n actions.' super().get_status() if (self.status not in ['D', 'P']): if os.path.exists(self.cov_db_test_dir): log.log(VERBOSE, 'Deleting coverage data of failing test:\n%s', self.cov_db_test_dir) os.system(('/bin/rm -rf ' + self.cov_db_test_dir))
def get_status(self): 'Override base class get_status implementation for additional post-status\n actions.' super().get_status() if (self.status not in ['D', 'P']): if os.path.exists(self.cov_db_test_dir): log.log(VERBOSE, 'Deleting coverage data of failing test:\n%s', self.cov_db_test_dir) os.system(('/bin/rm -rf ' + self.cov_db_test_dir))<|docstring|>Override base class get_status implementation for additional post-status actions.<|endoftext|>
51ecc0279a0b20c880c204d9b0b31570bf2cffd8df9ceb6050c9a91c762e14a1
def create_parser(): '\n Creates the argparse parser with all the arguments.\n ' parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) subparsers = parser.add_subparsers(title='subcommands', dest='cmd') parser_allocate = subparsers.add_parser('allocate', help='Allocate IP load test') parser_release = subparsers.add_parser('release', help='Release IP load test') for cmd in [parser_allocate, parser_release]: cmd.add_argument('--num', default=2000, help='Number of requests') cmd.add_argument('--import_path', help='Protobuf dir import path') parser_allocate.set_defaults(func=parser_allocate) parser_release.set_defaults(func=parser_release) return parser
Creates the argparse parser with all the arguments.
lte/gateway/python/load_tests/loadtest_mobilityd.py
create_parser
lucasgonze/magma
849
python
def create_parser(): '\n \n ' parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) subparsers = parser.add_subparsers(title='subcommands', dest='cmd') parser_allocate = subparsers.add_parser('allocate', help='Allocate IP load test') parser_release = subparsers.add_parser('release', help='Release IP load test') for cmd in [parser_allocate, parser_release]: cmd.add_argument('--num', default=2000, help='Number of requests') cmd.add_argument('--import_path', help='Protobuf dir import path') parser_allocate.set_defaults(func=parser_allocate) parser_release.set_defaults(func=parser_release) return parser
def create_parser(): '\n \n ' parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) subparsers = parser.add_subparsers(title='subcommands', dest='cmd') parser_allocate = subparsers.add_parser('allocate', help='Allocate IP load test') parser_release = subparsers.add_parser('release', help='Release IP load test') for cmd in [parser_allocate, parser_release]: cmd.add_argument('--num', default=2000, help='Number of requests') cmd.add_argument('--import_path', help='Protobuf dir import path') parser_allocate.set_defaults(func=parser_allocate) parser_release.set_defaults(func=parser_release) return parser<|docstring|>Creates the argparse parser with all the arguments.<|endoftext|>
07ccbcb352b642e3464786542164057fd669f9f9b849a09487ee5c6eb14debf4
def gauss_hermite(n, mode='numpy'): '\n Gauss-Hermite quadrature for integrals of the form\n\n int_{-inf}^{+inf} exp(-x^2) f(x) dx.\n ' if (mode == 'numpy'): (points, weights) = numpy.polynomial.hermite.hermgauss(n) else: (_, _, alpha, beta) = orthopy.e1r2.recurrence_coefficients(n, 'monic', symbolic=True) beta[1:] /= 2 (points, weights) = scheme_from_rc(alpha, beta, mode=mode) return E1r2Scheme(f'Gauss-Hermite ({n})', weights, points, ((2 * n) - 1))
Gauss-Hermite quadrature for integrals of the form int_{-inf}^{+inf} exp(-x^2) f(x) dx.
quadpy/e1r2/_gauss_hermite.py
gauss_hermite
whzup/quadpy
0
python
def gauss_hermite(n, mode='numpy'): '\n Gauss-Hermite quadrature for integrals of the form\n\n int_{-inf}^{+inf} exp(-x^2) f(x) dx.\n ' if (mode == 'numpy'): (points, weights) = numpy.polynomial.hermite.hermgauss(n) else: (_, _, alpha, beta) = orthopy.e1r2.recurrence_coefficients(n, 'monic', symbolic=True) beta[1:] /= 2 (points, weights) = scheme_from_rc(alpha, beta, mode=mode) return E1r2Scheme(f'Gauss-Hermite ({n})', weights, points, ((2 * n) - 1))
def gauss_hermite(n, mode='numpy'): '\n Gauss-Hermite quadrature for integrals of the form\n\n int_{-inf}^{+inf} exp(-x^2) f(x) dx.\n ' if (mode == 'numpy'): (points, weights) = numpy.polynomial.hermite.hermgauss(n) else: (_, _, alpha, beta) = orthopy.e1r2.recurrence_coefficients(n, 'monic', symbolic=True) beta[1:] /= 2 (points, weights) = scheme_from_rc(alpha, beta, mode=mode) return E1r2Scheme(f'Gauss-Hermite ({n})', weights, points, ((2 * n) - 1))<|docstring|>Gauss-Hermite quadrature for integrals of the form int_{-inf}^{+inf} exp(-x^2) f(x) dx.<|endoftext|>
86d3cde675a699af9c6ea81821cd05795d1ddfd34579324a7ec61543b710ba11
def load_data(name): 'Load splitted datasets for one experiment' train = pd.read_parquet(f'data/processed/X_{name}_train.parquet') valid = pd.read_parquet(f'data/processed/X_{name}_valid.parquet') test = pd.read_parquet(f'data/processed/X_{name}_test.parquet') exp_dict = {'name': name, 'train': train, 'valid': valid, 'test': test} return exp_dict
Load splitted datasets for one experiment
scripts/07_compare_features.py
load_data
ThorbenJensen/feature-engineering
15
python
def load_data(name): train = pd.read_parquet(f'data/processed/X_{name}_train.parquet') valid = pd.read_parquet(f'data/processed/X_{name}_valid.parquet') test = pd.read_parquet(f'data/processed/X_{name}_test.parquet') exp_dict = {'name': name, 'train': train, 'valid': valid, 'test': test} return exp_dict
def load_data(name): train = pd.read_parquet(f'data/processed/X_{name}_train.parquet') valid = pd.read_parquet(f'data/processed/X_{name}_valid.parquet') test = pd.read_parquet(f'data/processed/X_{name}_test.parquet') exp_dict = {'name': name, 'train': train, 'valid': valid, 'test': test} return exp_dict<|docstring|>Load splitted datasets for one experiment<|endoftext|>
5cf5c1c5cbec78cc16e992c60a01d45b63268e245e86abe99084a576b6f18076
def load_target(): 'Load target values independently from generated feature sets' y = {} for split in ['train', 'valid', 'test']: y[split] = pd.read_parquet(f'data/processed/Y_target_{split}.parquet').loc[(:, CNF_TARGET_COL)].squeeze().copy() return y
Load target values independently from generated feature sets
scripts/07_compare_features.py
load_target
ThorbenJensen/feature-engineering
15
python
def load_target(): y = {} for split in ['train', 'valid', 'test']: y[split] = pd.read_parquet(f'data/processed/Y_target_{split}.parquet').loc[(:, CNF_TARGET_COL)].squeeze().copy() return y
def load_target(): y = {} for split in ['train', 'valid', 'test']: y[split] = pd.read_parquet(f'data/processed/Y_target_{split}.parquet').loc[(:, CNF_TARGET_COL)].squeeze().copy() return y<|docstring|>Load target values independently from generated feature sets<|endoftext|>
8686d94da654d87f5f1edded2e4c0c3384fb9f13a9b03f4d39a1ef08184c45e1
def evaluate_features(experiment_seed_tuple): '\n Evaluate Feature Matrix for each algorithm by using an independent\n XG-Boost Regressor.\n :param experiment: Dict containing Name, Features(train, vaild, test)\n ' experiment = experiment_seed_tuple[0] seed = experiment_seed_tuple[1] X_train = experiment['train'] X_valid = experiment['valid'] X_test = experiment['test'] Y_train = Y_target['train'] Y_valid = Y_target['valid'] Y_test = Y_target['test'] def df_to_dmatrix(features, target): x = features.drop(columns=['Date'], errors='ignore') y = target dmatrix = xgb.DMatrix(x, label=y) return dmatrix dm_train = df_to_dmatrix(X_train, Y_train) dm_valid = df_to_dmatrix(X_valid, Y_valid) dm_test = df_to_dmatrix(X_test, Y_test) CMP_XGB_PARAMS['seed'] = seed evals = [(dm_train, 'train'), (dm_valid, 'valid')] model_bst = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals, num_boost_round=CMP_NUM_BOOST_ROUND, early_stopping_rounds=CMP_EARLY_STOPPING_ROUNDS) best_ntree_limit = model_bst.best_ntree_limit evals2 = [(dm_train, 'train'), (dm_test, 'test')] model_final = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals2, num_boost_round=best_ntree_limit) feature_information_gain = model_final.get_score(importance_type='gain') feature_importance = pd.DataFrame(list(feature_information_gain.items()), columns=['feature', 'information_gain']) feature_importance['algorithm'] = experiment['name'] feature_importance['seed'] = seed feature_importance = feature_importance[FEATURE_IMPORTANCE_COLUMNS] y_pred = model_final.predict(dm_test) y_true = dm_test.get_label() preds = pd.DataFrame() preds['y_true'] = y_true preds['y_pred'] = y_pred preds['algorithm'] = experiment['name'] preds['seed'] = seed preds = preds[PREDS_COLUMNS] r2 = r2_score(y_true=y_true, y_pred=y_pred) mse = mean_squared_error(y_true=y_true, y_pred=y_pred) rmse = sqrt(mse) metrics = pd.DataFrame([[experiment['name'], seed, mse, rmse, r2]], columns=EVAL_COLUMNS) eval_dict = {'name': experiment['name'], 'metrics': metrics, 'preds': preds, 'feature_importance': feature_importance} return eval_dict
Evaluate Feature Matrix for each algorithm by using an independent XG-Boost Regressor. :param experiment: Dict containing Name, Features(train, vaild, test)
scripts/07_compare_features.py
evaluate_features
ThorbenJensen/feature-engineering
15
python
def evaluate_features(experiment_seed_tuple): '\n Evaluate Feature Matrix for each algorithm by using an independent\n XG-Boost Regressor.\n :param experiment: Dict containing Name, Features(train, vaild, test)\n ' experiment = experiment_seed_tuple[0] seed = experiment_seed_tuple[1] X_train = experiment['train'] X_valid = experiment['valid'] X_test = experiment['test'] Y_train = Y_target['train'] Y_valid = Y_target['valid'] Y_test = Y_target['test'] def df_to_dmatrix(features, target): x = features.drop(columns=['Date'], errors='ignore') y = target dmatrix = xgb.DMatrix(x, label=y) return dmatrix dm_train = df_to_dmatrix(X_train, Y_train) dm_valid = df_to_dmatrix(X_valid, Y_valid) dm_test = df_to_dmatrix(X_test, Y_test) CMP_XGB_PARAMS['seed'] = seed evals = [(dm_train, 'train'), (dm_valid, 'valid')] model_bst = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals, num_boost_round=CMP_NUM_BOOST_ROUND, early_stopping_rounds=CMP_EARLY_STOPPING_ROUNDS) best_ntree_limit = model_bst.best_ntree_limit evals2 = [(dm_train, 'train'), (dm_test, 'test')] model_final = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals2, num_boost_round=best_ntree_limit) feature_information_gain = model_final.get_score(importance_type='gain') feature_importance = pd.DataFrame(list(feature_information_gain.items()), columns=['feature', 'information_gain']) feature_importance['algorithm'] = experiment['name'] feature_importance['seed'] = seed feature_importance = feature_importance[FEATURE_IMPORTANCE_COLUMNS] y_pred = model_final.predict(dm_test) y_true = dm_test.get_label() preds = pd.DataFrame() preds['y_true'] = y_true preds['y_pred'] = y_pred preds['algorithm'] = experiment['name'] preds['seed'] = seed preds = preds[PREDS_COLUMNS] r2 = r2_score(y_true=y_true, y_pred=y_pred) mse = mean_squared_error(y_true=y_true, y_pred=y_pred) rmse = sqrt(mse) metrics = pd.DataFrame([[experiment['name'], seed, mse, rmse, r2]], columns=EVAL_COLUMNS) eval_dict = {'name': experiment['name'], 'metrics': metrics, 'preds': preds, 'feature_importance': feature_importance} return eval_dict
def evaluate_features(experiment_seed_tuple): '\n Evaluate Feature Matrix for each algorithm by using an independent\n XG-Boost Regressor.\n :param experiment: Dict containing Name, Features(train, vaild, test)\n ' experiment = experiment_seed_tuple[0] seed = experiment_seed_tuple[1] X_train = experiment['train'] X_valid = experiment['valid'] X_test = experiment['test'] Y_train = Y_target['train'] Y_valid = Y_target['valid'] Y_test = Y_target['test'] def df_to_dmatrix(features, target): x = features.drop(columns=['Date'], errors='ignore') y = target dmatrix = xgb.DMatrix(x, label=y) return dmatrix dm_train = df_to_dmatrix(X_train, Y_train) dm_valid = df_to_dmatrix(X_valid, Y_valid) dm_test = df_to_dmatrix(X_test, Y_test) CMP_XGB_PARAMS['seed'] = seed evals = [(dm_train, 'train'), (dm_valid, 'valid')] model_bst = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals, num_boost_round=CMP_NUM_BOOST_ROUND, early_stopping_rounds=CMP_EARLY_STOPPING_ROUNDS) best_ntree_limit = model_bst.best_ntree_limit evals2 = [(dm_train, 'train'), (dm_test, 'test')] model_final = xgb.train(params=CMP_XGB_PARAMS, dtrain=dm_train, evals=evals2, num_boost_round=best_ntree_limit) feature_information_gain = model_final.get_score(importance_type='gain') feature_importance = pd.DataFrame(list(feature_information_gain.items()), columns=['feature', 'information_gain']) feature_importance['algorithm'] = experiment['name'] feature_importance['seed'] = seed feature_importance = feature_importance[FEATURE_IMPORTANCE_COLUMNS] y_pred = model_final.predict(dm_test) y_true = dm_test.get_label() preds = pd.DataFrame() preds['y_true'] = y_true preds['y_pred'] = y_pred preds['algorithm'] = experiment['name'] preds['seed'] = seed preds = preds[PREDS_COLUMNS] r2 = r2_score(y_true=y_true, y_pred=y_pred) mse = mean_squared_error(y_true=y_true, y_pred=y_pred) rmse = sqrt(mse) metrics = pd.DataFrame([[experiment['name'], seed, mse, rmse, r2]], columns=EVAL_COLUMNS) eval_dict = {'name': experiment['name'], 'metrics': metrics, 'preds': preds, 'feature_importance': feature_importance} return eval_dict<|docstring|>Evaluate Feature Matrix for each algorithm by using an independent XG-Boost Regressor. :param experiment: Dict containing Name, Features(train, vaild, test)<|endoftext|>
e4a12773466ed80dfa64a9a4e225efb900a919f851a27e2a5c3c421226a8bb6e
def read_csv_molecules(filename): 'Read molecules from the specified path\n\n Parameters\n ----------\n filename : str\n File from which molecules are to be read\n\n Returns\n -------\n molecules : list of openeye.oechem.OEMol\n The read molecules\n ' from openeye import oechem mol = oechem.OEMol() molecules = list() with oechem.oemolistream(filename) as ifs: while oechem.OEReadCSVFile(ifs, mol): molecules.append(oechem.OEMol(mol)) return molecules
Read molecules from the specified path Parameters ---------- filename : str File from which molecules are to be read Returns ------- molecules : list of openeye.oechem.OEMol The read molecules
scripts/attic/02-dock-ligands-to-corresponding-receptors-multiprocessing.py
read_csv_molecules
JenkeScheen/covid-moonshot
60
python
def read_csv_molecules(filename): 'Read molecules from the specified path\n\n Parameters\n ----------\n filename : str\n File from which molecules are to be read\n\n Returns\n -------\n molecules : list of openeye.oechem.OEMol\n The read molecules\n ' from openeye import oechem mol = oechem.OEMol() molecules = list() with oechem.oemolistream(filename) as ifs: while oechem.OEReadCSVFile(ifs, mol): molecules.append(oechem.OEMol(mol)) return molecules
def read_csv_molecules(filename): 'Read molecules from the specified path\n\n Parameters\n ----------\n filename : str\n File from which molecules are to be read\n\n Returns\n -------\n molecules : list of openeye.oechem.OEMol\n The read molecules\n ' from openeye import oechem mol = oechem.OEMol() molecules = list() with oechem.oemolistream(filename) as ifs: while oechem.OEReadCSVFile(ifs, mol): molecules.append(oechem.OEMol(mol)) return molecules<|docstring|>Read molecules from the specified path Parameters ---------- filename : str File from which molecules are to be read Returns ------- molecules : list of openeye.oechem.OEMol The read molecules<|endoftext|>
5f426f0c85e7b9c00c8f8cec64aa4e820ffb55ed3f3eef9cea069d3af9ded435
def dock_molecule(molecule, default_receptor='x0387'): "\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n molecule : OEMol\n The molecule to dock\n default_receptor : str, optional, default='0387'\n The default receptor to dock to\n\n Returns\n -------\n all_docked_molecules : list of OEMol\n All docked molecules\n " import os import oechem molecule = oechem.OEMol(molecule) fragments = list() fragments = oechem.OEGetSDData(molecule, 'fragments').split(',') fragments = [fragment for fragment in fragments if os.path.exists(f'../receptors/Mpro-{fragment}-receptor.oeb.gz')] if (len(fragments) == 0): fragments = [default_receptor] all_docked_molecules = list() for fragment in fragments: molecule_to_dock = oechem.OEMol(molecule) import os receptor_filename = os.path.join(f'../receptors/Mpro-{fragment}-receptor.oeb.gz') oechem.OESetSDData(molecule_to_dock, 'fragments', fragment) from openeye import oequacpac protomer = oechem.OEMol() protomers = [oechem.OEMol(protomer) for protomer in oequacpac.OEGetReasonableProtomers(molecule_to_dock)] docked_molecules = dock_molecules_to_receptor(receptor_filename, protomers) all_docked_molecules += docked_molecules return all_docked_molecules
Dock the specified molecules, writing out to specified file Parameters ---------- molecule : OEMol The molecule to dock default_receptor : str, optional, default='0387' The default receptor to dock to Returns ------- all_docked_molecules : list of OEMol All docked molecules
scripts/attic/02-dock-ligands-to-corresponding-receptors-multiprocessing.py
dock_molecule
JenkeScheen/covid-moonshot
60
python
def dock_molecule(molecule, default_receptor='x0387'): "\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n molecule : OEMol\n The molecule to dock\n default_receptor : str, optional, default='0387'\n The default receptor to dock to\n\n Returns\n -------\n all_docked_molecules : list of OEMol\n All docked molecules\n " import os import oechem molecule = oechem.OEMol(molecule) fragments = list() fragments = oechem.OEGetSDData(molecule, 'fragments').split(',') fragments = [fragment for fragment in fragments if os.path.exists(f'../receptors/Mpro-{fragment}-receptor.oeb.gz')] if (len(fragments) == 0): fragments = [default_receptor] all_docked_molecules = list() for fragment in fragments: molecule_to_dock = oechem.OEMol(molecule) import os receptor_filename = os.path.join(f'../receptors/Mpro-{fragment}-receptor.oeb.gz') oechem.OESetSDData(molecule_to_dock, 'fragments', fragment) from openeye import oequacpac protomer = oechem.OEMol() protomers = [oechem.OEMol(protomer) for protomer in oequacpac.OEGetReasonableProtomers(molecule_to_dock)] docked_molecules = dock_molecules_to_receptor(receptor_filename, protomers) all_docked_molecules += docked_molecules return all_docked_molecules
def dock_molecule(molecule, default_receptor='x0387'): "\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n molecule : OEMol\n The molecule to dock\n default_receptor : str, optional, default='0387'\n The default receptor to dock to\n\n Returns\n -------\n all_docked_molecules : list of OEMol\n All docked molecules\n " import os import oechem molecule = oechem.OEMol(molecule) fragments = list() fragments = oechem.OEGetSDData(molecule, 'fragments').split(',') fragments = [fragment for fragment in fragments if os.path.exists(f'../receptors/Mpro-{fragment}-receptor.oeb.gz')] if (len(fragments) == 0): fragments = [default_receptor] all_docked_molecules = list() for fragment in fragments: molecule_to_dock = oechem.OEMol(molecule) import os receptor_filename = os.path.join(f'../receptors/Mpro-{fragment}-receptor.oeb.gz') oechem.OESetSDData(molecule_to_dock, 'fragments', fragment) from openeye import oequacpac protomer = oechem.OEMol() protomers = [oechem.OEMol(protomer) for protomer in oequacpac.OEGetReasonableProtomers(molecule_to_dock)] docked_molecules = dock_molecules_to_receptor(receptor_filename, protomers) all_docked_molecules += docked_molecules return all_docked_molecules<|docstring|>Dock the specified molecules, writing out to specified file Parameters ---------- molecule : OEMol The molecule to dock default_receptor : str, optional, default='0387' The default receptor to dock to Returns ------- all_docked_molecules : list of OEMol All docked molecules<|endoftext|>
65c2429ac185cf7074e64ab9abf019144893b38376f0d15ad62e1ff133947cdd
def dock_molecules_to_receptor(receptor_filename, molecules): '\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n receptor_filename : str\n Receptor .oeb.gz filename\n molecules : list of openeye.oechem.OEMol\n The read molecules to dock\n\n Returns\n -------\n docked_molecules : list of OEMol\n All docked molecules\n\n ' from openeye import oechem, oedocking receptor = oechem.OEGraphMol() if (not oedocking.OEReadReceptorFile(receptor, receptor_filename)): oechem.OEThrow.Fatal('Unable to read receptor') if (not oedocking.OEReceptorHasBoundLigand(receptor)): raise Exception('Receptor does not have bound ligand') dockMethod = oedocking.OEDockMethod_Hybrid2 dockResolution = oedocking.OESearchResolution_High dock = oedocking.OEDock(dockMethod, dockResolution) success = dock.Initialize(receptor) from openeye import oeomega omegaOpts = oeomega.OEOmegaOptions() omega = oeomega.OEOmega(omegaOpts) omega.SetStrictStereo(False) docked_molecules = list() for mol in molecules: dockedMol = oechem.OEGraphMol() omega.Build(mol) retCode = dock.DockMultiConformerMolecule(dockedMol, mol) if (retCode != oedocking.OEDockingReturnCode_Success): print(('Docking Failed with error code ' + oedocking.OEDockingReturnCodeGetName(retCode))) continue sdtag = oedocking.OEDockMethodGetName(dockMethod) oedocking.OESetSDScore(dockedMol, dock, sdtag) dock.AnnotatePose(dockedMol) docked_molecules.append(oechem.OEMol(dockedMol)) return docked_molecules
Dock the specified molecules, writing out to specified file Parameters ---------- receptor_filename : str Receptor .oeb.gz filename molecules : list of openeye.oechem.OEMol The read molecules to dock Returns ------- docked_molecules : list of OEMol All docked molecules
scripts/attic/02-dock-ligands-to-corresponding-receptors-multiprocessing.py
dock_molecules_to_receptor
JenkeScheen/covid-moonshot
60
python
def dock_molecules_to_receptor(receptor_filename, molecules): '\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n receptor_filename : str\n Receptor .oeb.gz filename\n molecules : list of openeye.oechem.OEMol\n The read molecules to dock\n\n Returns\n -------\n docked_molecules : list of OEMol\n All docked molecules\n\n ' from openeye import oechem, oedocking receptor = oechem.OEGraphMol() if (not oedocking.OEReadReceptorFile(receptor, receptor_filename)): oechem.OEThrow.Fatal('Unable to read receptor') if (not oedocking.OEReceptorHasBoundLigand(receptor)): raise Exception('Receptor does not have bound ligand') dockMethod = oedocking.OEDockMethod_Hybrid2 dockResolution = oedocking.OESearchResolution_High dock = oedocking.OEDock(dockMethod, dockResolution) success = dock.Initialize(receptor) from openeye import oeomega omegaOpts = oeomega.OEOmegaOptions() omega = oeomega.OEOmega(omegaOpts) omega.SetStrictStereo(False) docked_molecules = list() for mol in molecules: dockedMol = oechem.OEGraphMol() omega.Build(mol) retCode = dock.DockMultiConformerMolecule(dockedMol, mol) if (retCode != oedocking.OEDockingReturnCode_Success): print(('Docking Failed with error code ' + oedocking.OEDockingReturnCodeGetName(retCode))) continue sdtag = oedocking.OEDockMethodGetName(dockMethod) oedocking.OESetSDScore(dockedMol, dock, sdtag) dock.AnnotatePose(dockedMol) docked_molecules.append(oechem.OEMol(dockedMol)) return docked_molecules
def dock_molecules_to_receptor(receptor_filename, molecules): '\n Dock the specified molecules, writing out to specified file\n\n Parameters\n ----------\n receptor_filename : str\n Receptor .oeb.gz filename\n molecules : list of openeye.oechem.OEMol\n The read molecules to dock\n\n Returns\n -------\n docked_molecules : list of OEMol\n All docked molecules\n\n ' from openeye import oechem, oedocking receptor = oechem.OEGraphMol() if (not oedocking.OEReadReceptorFile(receptor, receptor_filename)): oechem.OEThrow.Fatal('Unable to read receptor') if (not oedocking.OEReceptorHasBoundLigand(receptor)): raise Exception('Receptor does not have bound ligand') dockMethod = oedocking.OEDockMethod_Hybrid2 dockResolution = oedocking.OESearchResolution_High dock = oedocking.OEDock(dockMethod, dockResolution) success = dock.Initialize(receptor) from openeye import oeomega omegaOpts = oeomega.OEOmegaOptions() omega = oeomega.OEOmega(omegaOpts) omega.SetStrictStereo(False) docked_molecules = list() for mol in molecules: dockedMol = oechem.OEGraphMol() omega.Build(mol) retCode = dock.DockMultiConformerMolecule(dockedMol, mol) if (retCode != oedocking.OEDockingReturnCode_Success): print(('Docking Failed with error code ' + oedocking.OEDockingReturnCodeGetName(retCode))) continue sdtag = oedocking.OEDockMethodGetName(dockMethod) oedocking.OESetSDScore(dockedMol, dock, sdtag) dock.AnnotatePose(dockedMol) docked_molecules.append(oechem.OEMol(dockedMol)) return docked_molecules<|docstring|>Dock the specified molecules, writing out to specified file Parameters ---------- receptor_filename : str Receptor .oeb.gz filename molecules : list of openeye.oechem.OEMol The read molecules to dock Returns ------- docked_molecules : list of OEMol All docked molecules<|endoftext|>
541210db82bc5f24b62ce56e5d4ec5cd2d1cf3777680509ff36de930c8a3b870
def _load(self): ' return False if a new session is created\n ' def __valid_session_id(session_id): rx = kind.re_compile('^[0-9a-fA-F]+$') return rx.match(session_id) if (self._session_id and (not __valid_session_id(self._session_id))): self._session_id = None _sessionDao.cleanup(self._sessionConfig.timeout) if self._session_id: session = _sessionDao.get_session(self._session_id) if (session is not None): if ((not self._sessionConfig.ignore_change_ip) and (self._ip != session.ip)): raise SessionInvalidError() self._data.update(**_sessionDao.decode(session.data)) _sessionDao.access_session(self._session_id) else: self._session_id = None if (self._session_id is None): self._session_id = self._generate_session_id() _sessionDao.create_session(self._session_id, self._data.copy(), self._ip)
return False if a new session is created
ssguan/ignitor/web/session.py
_load
samuelbaizg/ssguan
1
python
def _load(self): ' \n ' def __valid_session_id(session_id): rx = kind.re_compile('^[0-9a-fA-F]+$') return rx.match(session_id) if (self._session_id and (not __valid_session_id(self._session_id))): self._session_id = None _sessionDao.cleanup(self._sessionConfig.timeout) if self._session_id: session = _sessionDao.get_session(self._session_id) if (session is not None): if ((not self._sessionConfig.ignore_change_ip) and (self._ip != session.ip)): raise SessionInvalidError() self._data.update(**_sessionDao.decode(session.data)) _sessionDao.access_session(self._session_id) else: self._session_id = None if (self._session_id is None): self._session_id = self._generate_session_id() _sessionDao.create_session(self._session_id, self._data.copy(), self._ip)
def _load(self): ' \n ' def __valid_session_id(session_id): rx = kind.re_compile('^[0-9a-fA-F]+$') return rx.match(session_id) if (self._session_id and (not __valid_session_id(self._session_id))): self._session_id = None _sessionDao.cleanup(self._sessionConfig.timeout) if self._session_id: session = _sessionDao.get_session(self._session_id) if (session is not None): if ((not self._sessionConfig.ignore_change_ip) and (self._ip != session.ip)): raise SessionInvalidError() self._data.update(**_sessionDao.decode(session.data)) _sessionDao.access_session(self._session_id) else: self._session_id = None if (self._session_id is None): self._session_id = self._generate_session_id() _sessionDao.create_session(self._session_id, self._data.copy(), self._ip)<|docstring|>return False if a new session is created<|endoftext|>
5d252c4e001634fc8a5b96c868923852f94563315ed20f73a5ce517d5fbfad35
def __init__(self, config, installer): "Initialize Client Authenticator.\n\n :param config: Configuration.\n :type config: :class:`letsencrypt.interfaces.IConfig`\n\n :param installer: Let's Encrypt Installer.\n :type installer: :class:`letsencrypt.interfaces.IInstaller`\n\n " self.proof_of_pos = proof_of_possession.ProofOfPossession(installer)
Initialize Client Authenticator. :param config: Configuration. :type config: :class:`letsencrypt.interfaces.IConfig` :param installer: Let's Encrypt Installer. :type installer: :class:`letsencrypt.interfaces.IInstaller`
letsencrypt/continuity_auth.py
__init__
mithrandi/letsencrypt
0
python
def __init__(self, config, installer): "Initialize Client Authenticator.\n\n :param config: Configuration.\n :type config: :class:`letsencrypt.interfaces.IConfig`\n\n :param installer: Let's Encrypt Installer.\n :type installer: :class:`letsencrypt.interfaces.IInstaller`\n\n " self.proof_of_pos = proof_of_possession.ProofOfPossession(installer)
def __init__(self, config, installer): "Initialize Client Authenticator.\n\n :param config: Configuration.\n :type config: :class:`letsencrypt.interfaces.IConfig`\n\n :param installer: Let's Encrypt Installer.\n :type installer: :class:`letsencrypt.interfaces.IInstaller`\n\n " self.proof_of_pos = proof_of_possession.ProofOfPossession(installer)<|docstring|>Initialize Client Authenticator. :param config: Configuration. :type config: :class:`letsencrypt.interfaces.IConfig` :param installer: Let's Encrypt Installer. :type installer: :class:`letsencrypt.interfaces.IInstaller`<|endoftext|>
c4d829aeb5070ccc7916129aa7e06101efbf2f50cc22bd954669deb1b4b82a77
def get_chall_pref(self, unused_domain): 'Return list of challenge preferences.' return [challenges.ProofOfPossession]
Return list of challenge preferences.
letsencrypt/continuity_auth.py
get_chall_pref
mithrandi/letsencrypt
0
python
def get_chall_pref(self, unused_domain): return [challenges.ProofOfPossession]
def get_chall_pref(self, unused_domain): return [challenges.ProofOfPossession]<|docstring|>Return list of challenge preferences.<|endoftext|>
40310bfe168904524074c0ab9d4b0a541b784d9a7932f5ba57b70dab038e63f3
def perform(self, achalls): 'Perform client specific challenges for IAuthenticator' responses = [] for achall in achalls: if isinstance(achall, achallenges.ProofOfPossession): responses.append(self.proof_of_pos.perform(achall)) else: raise errors.ContAuthError('Unexpected Challenge') return responses
Perform client specific challenges for IAuthenticator
letsencrypt/continuity_auth.py
perform
mithrandi/letsencrypt
0
python
def perform(self, achalls): responses = [] for achall in achalls: if isinstance(achall, achallenges.ProofOfPossession): responses.append(self.proof_of_pos.perform(achall)) else: raise errors.ContAuthError('Unexpected Challenge') return responses
def perform(self, achalls): responses = [] for achall in achalls: if isinstance(achall, achallenges.ProofOfPossession): responses.append(self.proof_of_pos.perform(achall)) else: raise errors.ContAuthError('Unexpected Challenge') return responses<|docstring|>Perform client specific challenges for IAuthenticator<|endoftext|>
9796f95a83b5dca9b89a0771a769157637c65e1aceefed3a8b6b7a94d0817f8f
def cleanup(self, achalls): 'Cleanup call for IAuthenticator.' for achall in achalls: if (not isinstance(achall, achallenges.ProofOfPossession)): raise errors.ContAuthError('Unexpected Challenge')
Cleanup call for IAuthenticator.
letsencrypt/continuity_auth.py
cleanup
mithrandi/letsencrypt
0
python
def cleanup(self, achalls): for achall in achalls: if (not isinstance(achall, achallenges.ProofOfPossession)): raise errors.ContAuthError('Unexpected Challenge')
def cleanup(self, achalls): for achall in achalls: if (not isinstance(achall, achallenges.ProofOfPossession)): raise errors.ContAuthError('Unexpected Challenge')<|docstring|>Cleanup call for IAuthenticator.<|endoftext|>
e3acd370fe9b26ca88d3822b1388817e708837eec732ed4f8ee11b078e233eb2
def compute_lid(x, x_train, k, exclude_self=False): '\n Calculate LID using the estimation from [1]\n\n [1] Ma et al., "Characterizing Adversarial Subspaces Using\n Local Intrinsic Dimensionality," ICLR 2018.\n ' with torch.no_grad(): x = x.view((x.size(0), (- 1))) x_train = x_train.view((x_train.size(0), (- 1))) lid = torch.zeros((x.size(0),)) for (i, x_cur) in enumerate(x): dist = (x_cur.view(1, (- 1)) - x_train).norm(2, 1) if exclude_self: topk_dist = dist.topk((k + 1), largest=False)[0][1:] else: topk_dist = dist.topk(k, largest=False)[0] mean_log = torch.log((topk_dist / topk_dist[(- 1)])).mean() lid[i] = ((- 1) / mean_log) return lid
Calculate LID using the estimation from [1] [1] Ma et al., "Characterizing Adversarial Subspaces Using Local Intrinsic Dimensionality," ICLR 2018.
lib/utils.py
compute_lid
chawins/entangle-rep
15
python
def compute_lid(x, x_train, k, exclude_self=False): '\n Calculate LID using the estimation from [1]\n\n [1] Ma et al., "Characterizing Adversarial Subspaces Using\n Local Intrinsic Dimensionality," ICLR 2018.\n ' with torch.no_grad(): x = x.view((x.size(0), (- 1))) x_train = x_train.view((x_train.size(0), (- 1))) lid = torch.zeros((x.size(0),)) for (i, x_cur) in enumerate(x): dist = (x_cur.view(1, (- 1)) - x_train).norm(2, 1) if exclude_self: topk_dist = dist.topk((k + 1), largest=False)[0][1:] else: topk_dist = dist.topk(k, largest=False)[0] mean_log = torch.log((topk_dist / topk_dist[(- 1)])).mean() lid[i] = ((- 1) / mean_log) return lid
def compute_lid(x, x_train, k, exclude_self=False): '\n Calculate LID using the estimation from [1]\n\n [1] Ma et al., "Characterizing Adversarial Subspaces Using\n Local Intrinsic Dimensionality," ICLR 2018.\n ' with torch.no_grad(): x = x.view((x.size(0), (- 1))) x_train = x_train.view((x_train.size(0), (- 1))) lid = torch.zeros((x.size(0),)) for (i, x_cur) in enumerate(x): dist = (x_cur.view(1, (- 1)) - x_train).norm(2, 1) if exclude_self: topk_dist = dist.topk((k + 1), largest=False)[0][1:] else: topk_dist = dist.topk(k, largest=False)[0] mean_log = torch.log((topk_dist / topk_dist[(- 1)])).mean() lid[i] = ((- 1) / mean_log) return lid<|docstring|>Calculate LID using the estimation from [1] [1] Ma et al., "Characterizing Adversarial Subspaces Using Local Intrinsic Dimensionality," ICLR 2018.<|endoftext|>
fe0f380d4ada5bb298b4d69cba0bf43cbb42c0cd56703e8fb5e9233cf4d53d36
def compute_spnorm_batch(inputs, output): '\n :param inputs: (batch_size, input_size)\n :param output: (batch_size, output_size)\n :return: jacobian: (batch_size, output_size, input_size)\n ' (batch_size, input_dim) = inputs.view(inputs.size(0), (- 1)).size() output = output.view(batch_size, (- 1)) jacobian = torch.zeros((batch_size, output.size(1), input_dim)) for i in range(output.size(1)): grad = torch.autograd.grad(output[(:, i)].sum(), inputs, retain_graph=True)[0] jacobian[(:, i, :)] = grad.view(batch_size, input_dim) norm = np.zeros((batch_size,)) for i in range(batch_size): norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2) return norm
:param inputs: (batch_size, input_size) :param output: (batch_size, output_size) :return: jacobian: (batch_size, output_size, input_size)
lib/utils.py
compute_spnorm_batch
chawins/entangle-rep
15
python
def compute_spnorm_batch(inputs, output): '\n :param inputs: (batch_size, input_size)\n :param output: (batch_size, output_size)\n :return: jacobian: (batch_size, output_size, input_size)\n ' (batch_size, input_dim) = inputs.view(inputs.size(0), (- 1)).size() output = output.view(batch_size, (- 1)) jacobian = torch.zeros((batch_size, output.size(1), input_dim)) for i in range(output.size(1)): grad = torch.autograd.grad(output[(:, i)].sum(), inputs, retain_graph=True)[0] jacobian[(:, i, :)] = grad.view(batch_size, input_dim) norm = np.zeros((batch_size,)) for i in range(batch_size): norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2) return norm
def compute_spnorm_batch(inputs, output): '\n :param inputs: (batch_size, input_size)\n :param output: (batch_size, output_size)\n :return: jacobian: (batch_size, output_size, input_size)\n ' (batch_size, input_dim) = inputs.view(inputs.size(0), (- 1)).size() output = output.view(batch_size, (- 1)) jacobian = torch.zeros((batch_size, output.size(1), input_dim)) for i in range(output.size(1)): grad = torch.autograd.grad(output[(:, i)].sum(), inputs, retain_graph=True)[0] jacobian[(:, i, :)] = grad.view(batch_size, input_dim) norm = np.zeros((batch_size,)) for i in range(batch_size): norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2) return norm<|docstring|>:param inputs: (batch_size, input_size) :param output: (batch_size, output_size) :return: jacobian: (batch_size, output_size, input_size)<|endoftext|>
bd8f20a180da24d1bfdfd47d89cb63104d29984688ce07b805d879350d028cc7
def add(self, group_creation_information): 'Creates a Group resource' group = Group(self.context) self.add_child(group) qry = CreateEntityQuery(self, group_creation_information) self.context.add_query(qry) return group
Creates a Group resource
office365/sharepoint/group_collection.py
add
stardust85/Office365-REST-Python-Client
0
python
def add(self, group_creation_information): group = Group(self.context) self.add_child(group) qry = CreateEntityQuery(self, group_creation_information) self.context.add_query(qry) return group
def add(self, group_creation_information): group = Group(self.context) self.add_child(group) qry = CreateEntityQuery(self, group_creation_information) self.context.add_query(qry) return group<|docstring|>Creates a Group resource<|endoftext|>
061cdedcd1415601964d9c6cc227088ad3ae356955e9057e4a49275125e520f7
def get_by_id(self, group_id): 'Returns the list item with the specified list item identifier.' group = Group(self.context, ResourcePathServiceOperation('getbyid', [group_id], self.resource_path)) return group
Returns the list item with the specified list item identifier.
office365/sharepoint/group_collection.py
get_by_id
stardust85/Office365-REST-Python-Client
0
python
def get_by_id(self, group_id): group = Group(self.context, ResourcePathServiceOperation('getbyid', [group_id], self.resource_path)) return group
def get_by_id(self, group_id): group = Group(self.context, ResourcePathServiceOperation('getbyid', [group_id], self.resource_path)) return group<|docstring|>Returns the list item with the specified list item identifier.<|endoftext|>
8197e204bd5234f4d91659445cd60de1d6447eef0b933d3aba6afd8d13872ab5
def get_by_name(self, group_name): 'Returns a cross-site group from the collection based on the name of the group.' return Group(self.context, ResourcePathServiceOperation('getbyname', [group_name], self.resource_path))
Returns a cross-site group from the collection based on the name of the group.
office365/sharepoint/group_collection.py
get_by_name
stardust85/Office365-REST-Python-Client
0
python
def get_by_name(self, group_name): return Group(self.context, ResourcePathServiceOperation('getbyname', [group_name], self.resource_path))
def get_by_name(self, group_name): return Group(self.context, ResourcePathServiceOperation('getbyname', [group_name], self.resource_path))<|docstring|>Returns a cross-site group from the collection based on the name of the group.<|endoftext|>
2f492f768bae5068f94cbc8610063949d7f97e66888278b10d7ccea07a3e9926
def remove_by_id(self, group_id): 'Removes the group with the specified member ID from the collection.' qry = ServiceOperationQuery(self, 'removebyid', [group_id]) self.context.add_query(qry)
Removes the group with the specified member ID from the collection.
office365/sharepoint/group_collection.py
remove_by_id
stardust85/Office365-REST-Python-Client
0
python
def remove_by_id(self, group_id): qry = ServiceOperationQuery(self, 'removebyid', [group_id]) self.context.add_query(qry)
def remove_by_id(self, group_id): qry = ServiceOperationQuery(self, 'removebyid', [group_id]) self.context.add_query(qry)<|docstring|>Removes the group with the specified member ID from the collection.<|endoftext|>
66ca0eb9d45a8d57cb674e44e0754a385fc8621690837bdf4268959d117da10f
def remove_by_login_name(self, group_name): 'Removes the cross-site group with the specified name from the collection.' qry = ServiceOperationQuery(self, 'removebyloginname', [group_name]) self.context.add_query(qry)
Removes the cross-site group with the specified name from the collection.
office365/sharepoint/group_collection.py
remove_by_login_name
stardust85/Office365-REST-Python-Client
0
python
def remove_by_login_name(self, group_name): qry = ServiceOperationQuery(self, 'removebyloginname', [group_name]) self.context.add_query(qry)
def remove_by_login_name(self, group_name): qry = ServiceOperationQuery(self, 'removebyloginname', [group_name]) self.context.add_query(qry)<|docstring|>Removes the cross-site group with the specified name from the collection.<|endoftext|>
04ffd4cd8fd433061d0eb372c36a6d62173f7a5570315f7579f51bba197c0bb8
def get_global_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['global'][option] except KeyError: return None
Returns the value of the option :returns: str or None
dynamic_ec2reservation/config_handler.py
get_global_option
highlyunavailable/dynamic-ec2reservation
1
python
def get_global_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['global'][option] except KeyError: return None
def get_global_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['global'][option] except KeyError: return None<|docstring|>Returns the value of the option :returns: str or None<|endoftext|>
796d2ecddc8ab40b038e4ad763b6a7a2220a05b84828b9222f1d1172448e3979
def get_logging_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['logging'][option] except KeyError: return None
Returns the value of the option :returns: str or None
dynamic_ec2reservation/config_handler.py
get_logging_option
highlyunavailable/dynamic-ec2reservation
1
python
def get_logging_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['logging'][option] except KeyError: return None
def get_logging_option(option): ' Returns the value of the option\n :returns: str or None\n ' try: return CONFIGURATION['logging'][option] except KeyError: return None<|docstring|>Returns the value of the option :returns: str or None<|endoftext|>