body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
a88696432c3aff222de448a2865f8ba5abbe6fbffc33e3fbd592e1dcf7caed9b | def update_quality(self):
'Update quality score for all items'
for value in self.organised_items.values():
for item in value:
'Constants'
QUALITY_MAX = 50
QUALITY_MIN = 0
'Update Quality: General Items and Conjured Items'
if (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name)):
if (('Conjured' in item.name) or (item.sell_in < 0)):
item.quality -= 2
elif (item.sell_in >= 0):
item.quality -= 1
if (item.quality < QUALITY_MIN):
item.quality = QUALITY_MIN
'Update Quality: Aged Brie'
if ('Aged Brie' in item.name):
if (item.quality == QUALITY_MAX):
item.quality = item.quality
elif (item.quality < QUALITY_MAX):
item.quality += 1
'Update Quality: Sulfuras'
if (('Sulfuras' in item.name) and (QUALITY_MIN < item.quality > QUALITY_MAX)):
item.quality = item.quality
'Update quality: Backstage passes'
if (('Backstage' in item.name) and (item.quality < QUALITY_MAX)):
if (item.sell_in >= 15):
item.quality += 1
if (10 <= item.sell_in < 15):
item.quality += 2
if (5 <= item.sell_in < 10):
item.quality += 3
if (item.sell_in <= 0):
item.quality = QUALITY_MIN
elif (('Backstage' in item.name) and (item.quality >= QUALITY_MAX)):
item.quality = QUALITY_MAX
return self.organised_items | Update quality score for all items | python/gilded_rose.py | update_quality | Mayo-Theodore/GildedRose-Refactoring-Kata | 0 | python | def update_quality(self):
for value in self.organised_items.values():
for item in value:
'Constants'
QUALITY_MAX = 50
QUALITY_MIN = 0
'Update Quality: General Items and Conjured Items'
if (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name)):
if (('Conjured' in item.name) or (item.sell_in < 0)):
item.quality -= 2
elif (item.sell_in >= 0):
item.quality -= 1
if (item.quality < QUALITY_MIN):
item.quality = QUALITY_MIN
'Update Quality: Aged Brie'
if ('Aged Brie' in item.name):
if (item.quality == QUALITY_MAX):
item.quality = item.quality
elif (item.quality < QUALITY_MAX):
item.quality += 1
'Update Quality: Sulfuras'
if (('Sulfuras' in item.name) and (QUALITY_MIN < item.quality > QUALITY_MAX)):
item.quality = item.quality
'Update quality: Backstage passes'
if (('Backstage' in item.name) and (item.quality < QUALITY_MAX)):
if (item.sell_in >= 15):
item.quality += 1
if (10 <= item.sell_in < 15):
item.quality += 2
if (5 <= item.sell_in < 10):
item.quality += 3
if (item.sell_in <= 0):
item.quality = QUALITY_MIN
elif (('Backstage' in item.name) and (item.quality >= QUALITY_MAX)):
item.quality = QUALITY_MAX
return self.organised_items | def update_quality(self):
for value in self.organised_items.values():
for item in value:
'Constants'
QUALITY_MAX = 50
QUALITY_MIN = 0
'Update Quality: General Items and Conjured Items'
if (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name)):
if (('Conjured' in item.name) or (item.sell_in < 0)):
item.quality -= 2
elif (item.sell_in >= 0):
item.quality -= 1
if (item.quality < QUALITY_MIN):
item.quality = QUALITY_MIN
'Update Quality: Aged Brie'
if ('Aged Brie' in item.name):
if (item.quality == QUALITY_MAX):
item.quality = item.quality
elif (item.quality < QUALITY_MAX):
item.quality += 1
'Update Quality: Sulfuras'
if (('Sulfuras' in item.name) and (QUALITY_MIN < item.quality > QUALITY_MAX)):
item.quality = item.quality
'Update quality: Backstage passes'
if (('Backstage' in item.name) and (item.quality < QUALITY_MAX)):
if (item.sell_in >= 15):
item.quality += 1
if (10 <= item.sell_in < 15):
item.quality += 2
if (5 <= item.sell_in < 10):
item.quality += 3
if (item.sell_in <= 0):
item.quality = QUALITY_MIN
elif (('Backstage' in item.name) and (item.quality >= QUALITY_MAX)):
item.quality = QUALITY_MAX
return self.organised_items<|docstring|>Update quality score for all items<|endoftext|> |
9978f3bce744996e4fe2829dde7005150fc38f7bb428fc8e64c3cd0471198082 | def parse_numprocesses(s):
'\n A little bit of processing to get number of parallel processes to use (since "auto" can be used to represent\n # of cores on machine)\n :param s: text to process\n :return: number of parallel worker processes to use\n '
try:
if s.startswith('auto'):
if ('*' in s):
multiplication_factor = int(s.rsplit('*', 1)[(- 1)])
elif (s == 'auto'):
multiplication_factor = 1
else:
raise Exception('Error: --cores argument must be an integer value or auto or auto*<int factor>')
return (cpu_count() * multiplication_factor)
else:
return int(s)
except ValueError:
raise Exception('Error: --cores argument must be an integer value or "auto" or "auto*<int factor>"') | A little bit of processing to get number of parallel processes to use (since "auto" can be used to represent
# of cores on machine)
:param s: text to process
:return: number of parallel worker processes to use | src/pytest_mproc/plugin.py | parse_numprocesses | nak/pytest_mproc | 6 | python | def parse_numprocesses(s):
'\n A little bit of processing to get number of parallel processes to use (since "auto" can be used to represent\n # of cores on machine)\n :param s: text to process\n :return: number of parallel worker processes to use\n '
try:
if s.startswith('auto'):
if ('*' in s):
multiplication_factor = int(s.rsplit('*', 1)[(- 1)])
elif (s == 'auto'):
multiplication_factor = 1
else:
raise Exception('Error: --cores argument must be an integer value or auto or auto*<int factor>')
return (cpu_count() * multiplication_factor)
else:
return int(s)
except ValueError:
raise Exception('Error: --cores argument must be an integer value or "auto" or "auto*<int factor>"') | def parse_numprocesses(s):
'\n A little bit of processing to get number of parallel processes to use (since "auto" can be used to represent\n # of cores on machine)\n :param s: text to process\n :return: number of parallel worker processes to use\n '
try:
if s.startswith('auto'):
if ('*' in s):
multiplication_factor = int(s.rsplit('*', 1)[(- 1)])
elif (s == 'auto'):
multiplication_factor = 1
else:
raise Exception('Error: --cores argument must be an integer value or auto or auto*<int factor>')
return (cpu_count() * multiplication_factor)
else:
return int(s)
except ValueError:
raise Exception('Error: --cores argument must be an integer value or "auto" or "auto*<int factor>"')<|docstring|>A little bit of processing to get number of parallel processes to use (since "auto" can be used to represent
# of cores on machine)
:param s: text to process
:return: number of parallel worker processes to use<|endoftext|> |
e041bf2453fc205377c632fc4e4b5d4a8da4f3e016a6aa2f8b7ed4d491b16b83 | @pytest.mark.tryfirst
def pytest_addoption(parser):
'\n add options to given parser for this plugin\n '
group = parser.getgroup('pytest_mproc', 'better distributed testing through multiprocessing')
group._addoption('--cores', dest='mproc_numcores', metavar='mproc_numcores', action='store', type=parse_numprocesses, help="you can use 'auto' here to set to the number of CPU cores on host system")
group._addoption('--disable-mproc', dest='mproc_disabled', metavar='mproc_disabled', action='store', type=bool, help='disable any parallel mproc testing, overriding all other mproc arguments')
group._addoption('--as-server', dest='mproc_server_port', metavar='mproc_server_port', action='store', type=int, help='port on which you wish to run server (for multi-host runs only)')
group._addoption('--as-client', dest='mproc_client_connect', metavar='mproc_client_connect', action='store', type=str, help='host:port specification of master node to connect to as client')
group._addoption('--max-simultaneous-connections', dest='mproc_max_simultaneous_connections', metavar='mproc_max_simultaneous_connections', action='store', type=int, help='max # of connections allowed at one time to main process, to prevent deadlock from overload')
group._addoption('--connection-timeout', dest='mproc_connection_timeout', metavar='mproc_connection_timeout', action='store', type=int, help='wait this many seconds on connection of client before timing out') | add options to given parser for this plugin | src/pytest_mproc/plugin.py | pytest_addoption | nak/pytest_mproc | 6 | python | @pytest.mark.tryfirst
def pytest_addoption(parser):
'\n \n '
group = parser.getgroup('pytest_mproc', 'better distributed testing through multiprocessing')
group._addoption('--cores', dest='mproc_numcores', metavar='mproc_numcores', action='store', type=parse_numprocesses, help="you can use 'auto' here to set to the number of CPU cores on host system")
group._addoption('--disable-mproc', dest='mproc_disabled', metavar='mproc_disabled', action='store', type=bool, help='disable any parallel mproc testing, overriding all other mproc arguments')
group._addoption('--as-server', dest='mproc_server_port', metavar='mproc_server_port', action='store', type=int, help='port on which you wish to run server (for multi-host runs only)')
group._addoption('--as-client', dest='mproc_client_connect', metavar='mproc_client_connect', action='store', type=str, help='host:port specification of master node to connect to as client')
group._addoption('--max-simultaneous-connections', dest='mproc_max_simultaneous_connections', metavar='mproc_max_simultaneous_connections', action='store', type=int, help='max # of connections allowed at one time to main process, to prevent deadlock from overload')
group._addoption('--connection-timeout', dest='mproc_connection_timeout', metavar='mproc_connection_timeout', action='store', type=int, help='wait this many seconds on connection of client before timing out') | @pytest.mark.tryfirst
def pytest_addoption(parser):
'\n \n '
group = parser.getgroup('pytest_mproc', 'better distributed testing through multiprocessing')
group._addoption('--cores', dest='mproc_numcores', metavar='mproc_numcores', action='store', type=parse_numprocesses, help="you can use 'auto' here to set to the number of CPU cores on host system")
group._addoption('--disable-mproc', dest='mproc_disabled', metavar='mproc_disabled', action='store', type=bool, help='disable any parallel mproc testing, overriding all other mproc arguments')
group._addoption('--as-server', dest='mproc_server_port', metavar='mproc_server_port', action='store', type=int, help='port on which you wish to run server (for multi-host runs only)')
group._addoption('--as-client', dest='mproc_client_connect', metavar='mproc_client_connect', action='store', type=str, help='host:port specification of master node to connect to as client')
group._addoption('--max-simultaneous-connections', dest='mproc_max_simultaneous_connections', metavar='mproc_max_simultaneous_connections', action='store', type=int, help='max # of connections allowed at one time to main process, to prevent deadlock from overload')
group._addoption('--connection-timeout', dest='mproc_connection_timeout', metavar='mproc_connection_timeout', action='store', type=int, help='wait this many seconds on connection of client before timing out')<|docstring|>add options to given parser for this plugin<|endoftext|> |
6393360af69c4829b8772832b9e4d826816bbc2a92ea4b1e1fb5a6ed7f2cd532 | @pytest.mark.tryfirst
def pytest_cmdline_main(config):
'\n Called before "true" main routine. This is to set up config values well ahead of time\n for things like pytest-cov that needs to know we are running distributed\n\n Mostly taken from other implementations (such as xdist)\n '
if config.option.collectonly:
return
reporter = BasicReporter()
worker = getattr(config.option, 'mproc_worker', None)
mproc_server_port = getattr(config.option, 'mproc_server_port', None)
config.option.mproc_is_serving_remotes = (mproc_server_port is not None)
mproc_server_host = (_get_ip_addr() if (mproc_server_port is not None) else '127.0.0.1')
mproc_client_connect = getattr(config.option, 'mproc_client_connect', None)
if (mproc_client_connect and mproc_server_port):
raise pytest.UsageError('Cannot specify both -as-master and --as-client at same time')
config.option.mproc_max_simultaneous_connections = (24 if (config.option.mproc_max_simultaneous_connections is None) else config.option.mproc_max_simultaneous_connections)
config.option.numprocesses = config.option.mproc_numcores
if (config.option.numprocesses and (config.option.numprocesses < 0)):
raise pytest.UsageError('Number of cores must be greater than or equal to zero when running as a master')
if (config.option.mproc_max_simultaneous_connections <= 0):
raise pytest.UsageError('max simultaneous connections must be greater than 0; preferably greater than 9')
if (config.option.mproc_connection_timeout is not None):
fixtures.CONNECTION_TIMEOU = config.option.mproc_connection_timeout
if ((getattr(config.option, 'mproc_numcores', None) is None) or is_degraded() or getattr(config.option, 'mproc_disabled')):
reporter.write('>>>>> no number of cores provided or running in environment unsupportive of parallelized testing, not running multiprocessing <<<<<\n', yellow=True)
return
if worker:
return
config.option.mproc_is_remote_client = (mproc_client_connect is not None)
if mproc_client_connect:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running as client')
try:
(host, port) = mproc_client_connect.rsplit(':', maxsplit=1)
except Exception:
raise pytest.UsageError("--as-client must be specified in form '<host>:<port>' of the master node")
try:
port = int(port)
except ValueError:
raise pytest.UsageError('When specifying connection as client, port must be an integer value')
else:
if (mproc_server_port is not None):
(host, port) = (mproc_server_host, mproc_server_port)
else:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running on single host')
(host, port) = ('127.0.0.1', find_free_port())
config.option.mproc_main = Orchestrator(host=host, port=(port or find_free_port()), is_serving_remotes=config.option.mproc_is_serving_remotes)
BasicReporter().write(f'Started on port {port}')
reporter.write(f'''Running as main @ {host}:{port}
''', green=True)
if (config.option.numprocesses > 0):
factory = CoordinatorFactory(config.option.numprocesses, host=host, port=port, max_simultaneous_connections=config.option.mproc_max_simultaneous_connections, as_remote_client=config.option.mproc_is_remote_client)
config.option.mproc_coordinator = factory.launch()
config.option.dist = 'no'
val = config.getvalue
if (not val('collectonly')):
usepdb = config.getoption('usepdb')
if ((val('dist') != 'no') and usepdb):
raise pytest.UsageError('--pdb is incompatible with distributing tests.') | Called before "true" main routine. This is to set up config values well ahead of time
for things like pytest-cov that needs to know we are running distributed
Mostly taken from other implementations (such as xdist) | src/pytest_mproc/plugin.py | pytest_cmdline_main | nak/pytest_mproc | 6 | python | @pytest.mark.tryfirst
def pytest_cmdline_main(config):
'\n Called before "true" main routine. This is to set up config values well ahead of time\n for things like pytest-cov that needs to know we are running distributed\n\n Mostly taken from other implementations (such as xdist)\n '
if config.option.collectonly:
return
reporter = BasicReporter()
worker = getattr(config.option, 'mproc_worker', None)
mproc_server_port = getattr(config.option, 'mproc_server_port', None)
config.option.mproc_is_serving_remotes = (mproc_server_port is not None)
mproc_server_host = (_get_ip_addr() if (mproc_server_port is not None) else '127.0.0.1')
mproc_client_connect = getattr(config.option, 'mproc_client_connect', None)
if (mproc_client_connect and mproc_server_port):
raise pytest.UsageError('Cannot specify both -as-master and --as-client at same time')
config.option.mproc_max_simultaneous_connections = (24 if (config.option.mproc_max_simultaneous_connections is None) else config.option.mproc_max_simultaneous_connections)
config.option.numprocesses = config.option.mproc_numcores
if (config.option.numprocesses and (config.option.numprocesses < 0)):
raise pytest.UsageError('Number of cores must be greater than or equal to zero when running as a master')
if (config.option.mproc_max_simultaneous_connections <= 0):
raise pytest.UsageError('max simultaneous connections must be greater than 0; preferably greater than 9')
if (config.option.mproc_connection_timeout is not None):
fixtures.CONNECTION_TIMEOU = config.option.mproc_connection_timeout
if ((getattr(config.option, 'mproc_numcores', None) is None) or is_degraded() or getattr(config.option, 'mproc_disabled')):
reporter.write('>>>>> no number of cores provided or running in environment unsupportive of parallelized testing, not running multiprocessing <<<<<\n', yellow=True)
return
if worker:
return
config.option.mproc_is_remote_client = (mproc_client_connect is not None)
if mproc_client_connect:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running as client')
try:
(host, port) = mproc_client_connect.rsplit(':', maxsplit=1)
except Exception:
raise pytest.UsageError("--as-client must be specified in form '<host>:<port>' of the master node")
try:
port = int(port)
except ValueError:
raise pytest.UsageError('When specifying connection as client, port must be an integer value')
else:
if (mproc_server_port is not None):
(host, port) = (mproc_server_host, mproc_server_port)
else:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running on single host')
(host, port) = ('127.0.0.1', find_free_port())
config.option.mproc_main = Orchestrator(host=host, port=(port or find_free_port()), is_serving_remotes=config.option.mproc_is_serving_remotes)
BasicReporter().write(f'Started on port {port}')
reporter.write(f'Running as main @ {host}:{port}
', green=True)
if (config.option.numprocesses > 0):
factory = CoordinatorFactory(config.option.numprocesses, host=host, port=port, max_simultaneous_connections=config.option.mproc_max_simultaneous_connections, as_remote_client=config.option.mproc_is_remote_client)
config.option.mproc_coordinator = factory.launch()
config.option.dist = 'no'
val = config.getvalue
if (not val('collectonly')):
usepdb = config.getoption('usepdb')
if ((val('dist') != 'no') and usepdb):
raise pytest.UsageError('--pdb is incompatible with distributing tests.') | @pytest.mark.tryfirst
def pytest_cmdline_main(config):
'\n Called before "true" main routine. This is to set up config values well ahead of time\n for things like pytest-cov that needs to know we are running distributed\n\n Mostly taken from other implementations (such as xdist)\n '
if config.option.collectonly:
return
reporter = BasicReporter()
worker = getattr(config.option, 'mproc_worker', None)
mproc_server_port = getattr(config.option, 'mproc_server_port', None)
config.option.mproc_is_serving_remotes = (mproc_server_port is not None)
mproc_server_host = (_get_ip_addr() if (mproc_server_port is not None) else '127.0.0.1')
mproc_client_connect = getattr(config.option, 'mproc_client_connect', None)
if (mproc_client_connect and mproc_server_port):
raise pytest.UsageError('Cannot specify both -as-master and --as-client at same time')
config.option.mproc_max_simultaneous_connections = (24 if (config.option.mproc_max_simultaneous_connections is None) else config.option.mproc_max_simultaneous_connections)
config.option.numprocesses = config.option.mproc_numcores
if (config.option.numprocesses and (config.option.numprocesses < 0)):
raise pytest.UsageError('Number of cores must be greater than or equal to zero when running as a master')
if (config.option.mproc_max_simultaneous_connections <= 0):
raise pytest.UsageError('max simultaneous connections must be greater than 0; preferably greater than 9')
if (config.option.mproc_connection_timeout is not None):
fixtures.CONNECTION_TIMEOU = config.option.mproc_connection_timeout
if ((getattr(config.option, 'mproc_numcores', None) is None) or is_degraded() or getattr(config.option, 'mproc_disabled')):
reporter.write('>>>>> no number of cores provided or running in environment unsupportive of parallelized testing, not running multiprocessing <<<<<\n', yellow=True)
return
if worker:
return
config.option.mproc_is_remote_client = (mproc_client_connect is not None)
if mproc_client_connect:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running as client')
try:
(host, port) = mproc_client_connect.rsplit(':', maxsplit=1)
except Exception:
raise pytest.UsageError("--as-client must be specified in form '<host>:<port>' of the master node")
try:
port = int(port)
except ValueError:
raise pytest.UsageError('When specifying connection as client, port must be an integer value')
else:
if (mproc_server_port is not None):
(host, port) = (mproc_server_host, mproc_server_port)
else:
if (config.option.numprocesses < 1):
raise pytest.UsageError('Number of cores must be 1 or more when running on single host')
(host, port) = ('127.0.0.1', find_free_port())
config.option.mproc_main = Orchestrator(host=host, port=(port or find_free_port()), is_serving_remotes=config.option.mproc_is_serving_remotes)
BasicReporter().write(f'Started on port {port}')
reporter.write(f'Running as main @ {host}:{port}
', green=True)
if (config.option.numprocesses > 0):
factory = CoordinatorFactory(config.option.numprocesses, host=host, port=port, max_simultaneous_connections=config.option.mproc_max_simultaneous_connections, as_remote_client=config.option.mproc_is_remote_client)
config.option.mproc_coordinator = factory.launch()
config.option.dist = 'no'
val = config.getvalue
if (not val('collectonly')):
usepdb = config.getoption('usepdb')
if ((val('dist') != 'no') and usepdb):
raise pytest.UsageError('--pdb is incompatible with distributing tests.')<|docstring|>Called before "true" main routine. This is to set up config values well ahead of time
for things like pytest-cov that needs to know we are running distributed
Mostly taken from other implementations (such as xdist)<|endoftext|> |
dd5a610c8cba83c811e797dd3fa42925abaa6a195cb5ba25682892817e0ca8f9 | @pytest.fixture(scope='node')
def mp_tmpdir_factory():
"\n :return: a factory for creating unique tmp directories, unique across all Process's\n "
with TmpDirFactory() as factory:
(yield factory) | :return: a factory for creating unique tmp directories, unique across all Process's | src/pytest_mproc/plugin.py | mp_tmpdir_factory | nak/pytest_mproc | 6 | python | @pytest.fixture(scope='node')
def mp_tmpdir_factory():
"\n \n "
with TmpDirFactory() as factory:
(yield factory) | @pytest.fixture(scope='node')
def mp_tmpdir_factory():
"\n \n "
with TmpDirFactory() as factory:
(yield factory)<|docstring|>:return: a factory for creating unique tmp directories, unique across all Process's<|endoftext|> |
61662189d229cb9b9ee2975fd8ec1dd1c2bf09848abd95bafec7a5ba6b7ff73a | @contextmanager
def create_tmp_dir(self, cleanup_immediately: bool=True):
"\n :param cleanup_immediately: if True, rm the directory and all contents when associated fixture is no longer\n in use, otherwise wait until end of test session when everything is cleaned up\n :return: newly create temp directory unique across all Process's\n "
tmpdir = tempfile.mkdtemp(dir=self._root_tmp_dir)
try:
(yield Path(tmpdir))
finally:
if cleanup_immediately:
with suppress(Exception):
shutil.rmtree(tmpdir) | :param cleanup_immediately: if True, rm the directory and all contents when associated fixture is no longer
in use, otherwise wait until end of test session when everything is cleaned up
:return: newly create temp directory unique across all Process's | src/pytest_mproc/plugin.py | create_tmp_dir | nak/pytest_mproc | 6 | python | @contextmanager
def create_tmp_dir(self, cleanup_immediately: bool=True):
"\n :param cleanup_immediately: if True, rm the directory and all contents when associated fixture is no longer\n in use, otherwise wait until end of test session when everything is cleaned up\n :return: newly create temp directory unique across all Process's\n "
tmpdir = tempfile.mkdtemp(dir=self._root_tmp_dir)
try:
(yield Path(tmpdir))
finally:
if cleanup_immediately:
with suppress(Exception):
shutil.rmtree(tmpdir) | @contextmanager
def create_tmp_dir(self, cleanup_immediately: bool=True):
"\n :param cleanup_immediately: if True, rm the directory and all contents when associated fixture is no longer\n in use, otherwise wait until end of test session when everything is cleaned up\n :return: newly create temp directory unique across all Process's\n "
tmpdir = tempfile.mkdtemp(dir=self._root_tmp_dir)
try:
(yield Path(tmpdir))
finally:
if cleanup_immediately:
with suppress(Exception):
shutil.rmtree(tmpdir)<|docstring|>:param cleanup_immediately: if True, rm the directory and all contents when associated fixture is no longer
in use, otherwise wait until end of test session when everything is cleaned up
:return: newly create temp directory unique across all Process's<|endoftext|> |
0bcf74486541f16a5d4ef7c1e79d358da21ad5bab406a00bed11176cad09402e | def setup(self, sub):
' Initialize this solver.\n\n Args\n ----\n sub: `System`\n System that owns this solver.\n '
if sub.is_active():
self.unknowns_cache = np.empty(sub.unknowns.vec.shape) | Initialize this solver.
Args
----
sub: `System`
System that owns this solver. | aerostructures/solvers/nl_gauss_seidel.py | setup | NitroCortex/aerostructures | 5 | python | def setup(self, sub):
' Initialize this solver.\n\n Args\n ----\n sub: `System`\n System that owns this solver.\n '
if sub.is_active():
self.unknowns_cache = np.empty(sub.unknowns.vec.shape) | def setup(self, sub):
' Initialize this solver.\n\n Args\n ----\n sub: `System`\n System that owns this solver.\n '
if sub.is_active():
self.unknowns_cache = np.empty(sub.unknowns.vec.shape)<|docstring|>Initialize this solver.
Args
----
sub: `System`
System that owns this solver.<|endoftext|> |
3ed693fb709e0283492d0004ed88378a14a6597e7e3f3370fe66e77542f2c450 | @error_wrap_nl
def solve(self, params, unknowns, resids, system, metadata=None):
' Solves the system using Gauss Seidel.\n\n Args\n ----\n params : `VecWrapper`\n `VecWrapper` containing parameters. (p)\n\n unknowns : `VecWrapper`\n `VecWrapper` containing outputs and states. (u)\n\n resids : `VecWrapper`\n `VecWrapper` containing residuals. (r)\n\n system : `System`\n Parent `System` object.\n\n metadata : dict, optional\n Dictionary containing execution metadata (e.g. iteration coordinate).\n '
atol = self.options['atol']
rtol = self.options['rtol']
utol = self.options['utol']
maxiter = self.options['maxiter']
rutol = self.options['rutol']
iprint = self.options['iprint']
unknowns_cache = self.unknowns_cache
self.iter_count = 1
local_meta = create_local_meta(metadata, system.pathname)
system.ln_solver.local_meta = local_meta
update_local_meta(local_meta, (self.iter_count,))
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
if (maxiter == 1):
return
resids = system.resids
unknowns_cache = np.zeros(unknowns.vec.shape)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
basenorm = (normval if (normval > atol) else 1.0)
u_norm = 1e+99
ru_norm = 1e+99
if (iprint == 2):
self.print_norm(self.print_name, system, 1, normval, basenorm)
while ((self.iter_count < maxiter) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
self.iter_count += 1
update_local_meta(local_meta, (self.iter_count,))
unknowns_cache[:] = unknowns.vec
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
u_norm = np.linalg.norm((unknowns.vec - unknowns_cache))
ru_norm = (np.linalg.norm((unknowns.vec - unknowns_cache)) / np.linalg.norm(unknowns.vec))
if self.options['use_aitken']:
if ((type(self.delta_u_n_1) is not str) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
delta_u_n = (unknowns.vec - unknowns_cache)
delta_u_n_1 = self.delta_u_n_1
self.aitken_alpha = (self.aitken_alpha * (1.0 - (np.dot((delta_u_n - delta_u_n_1), delta_u_n) / (np.linalg.norm((delta_u_n - delta_u_n_1), 2) ** 2))))
self.aitken_alpha = max(self.options['aitken_alpha_min'], min(self.options['aitken_alpha_max'], self.aitken_alpha))
if ((iprint == 1) or (iprint == 2)):
print('Aitken relaxation factor is', self.aitken_alpha)
self.delta_u_n_1 = delta_u_n.copy()
unknowns.vec[:] = (unknowns_cache + (self.aitken_alpha * delta_u_n))
elif (type(self.delta_u_n_1) is str):
self.delta_u_n_1 = (unknowns.vec - unknowns_cache)
if (iprint == 2):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if (iprint == 1):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if ((self.iter_count >= maxiter) or isnan(normval)):
msg = ('FAILED to converge after %d iterations' % self.iter_count)
fail = True
else:
fail = False
if ((iprint > 0) or (fail and (iprint > (- 1)))):
if (not fail):
msg = ('Converged in %d iterations' % self.iter_count)
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, msg=msg)
if (fail and self.options['err_on_maxiter']):
raise AnalysisError(("Solve in '%s': NLGaussSeidel %s" % (system.pathname, msg))) | Solves the system using Gauss Seidel.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
system : `System`
Parent `System` object.
metadata : dict, optional
Dictionary containing execution metadata (e.g. iteration coordinate). | aerostructures/solvers/nl_gauss_seidel.py | solve | NitroCortex/aerostructures | 5 | python | @error_wrap_nl
def solve(self, params, unknowns, resids, system, metadata=None):
' Solves the system using Gauss Seidel.\n\n Args\n ----\n params : `VecWrapper`\n `VecWrapper` containing parameters. (p)\n\n unknowns : `VecWrapper`\n `VecWrapper` containing outputs and states. (u)\n\n resids : `VecWrapper`\n `VecWrapper` containing residuals. (r)\n\n system : `System`\n Parent `System` object.\n\n metadata : dict, optional\n Dictionary containing execution metadata (e.g. iteration coordinate).\n '
atol = self.options['atol']
rtol = self.options['rtol']
utol = self.options['utol']
maxiter = self.options['maxiter']
rutol = self.options['rutol']
iprint = self.options['iprint']
unknowns_cache = self.unknowns_cache
self.iter_count = 1
local_meta = create_local_meta(metadata, system.pathname)
system.ln_solver.local_meta = local_meta
update_local_meta(local_meta, (self.iter_count,))
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
if (maxiter == 1):
return
resids = system.resids
unknowns_cache = np.zeros(unknowns.vec.shape)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
basenorm = (normval if (normval > atol) else 1.0)
u_norm = 1e+99
ru_norm = 1e+99
if (iprint == 2):
self.print_norm(self.print_name, system, 1, normval, basenorm)
while ((self.iter_count < maxiter) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
self.iter_count += 1
update_local_meta(local_meta, (self.iter_count,))
unknowns_cache[:] = unknowns.vec
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
u_norm = np.linalg.norm((unknowns.vec - unknowns_cache))
ru_norm = (np.linalg.norm((unknowns.vec - unknowns_cache)) / np.linalg.norm(unknowns.vec))
if self.options['use_aitken']:
if ((type(self.delta_u_n_1) is not str) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
delta_u_n = (unknowns.vec - unknowns_cache)
delta_u_n_1 = self.delta_u_n_1
self.aitken_alpha = (self.aitken_alpha * (1.0 - (np.dot((delta_u_n - delta_u_n_1), delta_u_n) / (np.linalg.norm((delta_u_n - delta_u_n_1), 2) ** 2))))
self.aitken_alpha = max(self.options['aitken_alpha_min'], min(self.options['aitken_alpha_max'], self.aitken_alpha))
if ((iprint == 1) or (iprint == 2)):
print('Aitken relaxation factor is', self.aitken_alpha)
self.delta_u_n_1 = delta_u_n.copy()
unknowns.vec[:] = (unknowns_cache + (self.aitken_alpha * delta_u_n))
elif (type(self.delta_u_n_1) is str):
self.delta_u_n_1 = (unknowns.vec - unknowns_cache)
if (iprint == 2):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if (iprint == 1):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if ((self.iter_count >= maxiter) or isnan(normval)):
msg = ('FAILED to converge after %d iterations' % self.iter_count)
fail = True
else:
fail = False
if ((iprint > 0) or (fail and (iprint > (- 1)))):
if (not fail):
msg = ('Converged in %d iterations' % self.iter_count)
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, msg=msg)
if (fail and self.options['err_on_maxiter']):
raise AnalysisError(("Solve in '%s': NLGaussSeidel %s" % (system.pathname, msg))) | @error_wrap_nl
def solve(self, params, unknowns, resids, system, metadata=None):
' Solves the system using Gauss Seidel.\n\n Args\n ----\n params : `VecWrapper`\n `VecWrapper` containing parameters. (p)\n\n unknowns : `VecWrapper`\n `VecWrapper` containing outputs and states. (u)\n\n resids : `VecWrapper`\n `VecWrapper` containing residuals. (r)\n\n system : `System`\n Parent `System` object.\n\n metadata : dict, optional\n Dictionary containing execution metadata (e.g. iteration coordinate).\n '
atol = self.options['atol']
rtol = self.options['rtol']
utol = self.options['utol']
maxiter = self.options['maxiter']
rutol = self.options['rutol']
iprint = self.options['iprint']
unknowns_cache = self.unknowns_cache
self.iter_count = 1
local_meta = create_local_meta(metadata, system.pathname)
system.ln_solver.local_meta = local_meta
update_local_meta(local_meta, (self.iter_count,))
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
if (maxiter == 1):
return
resids = system.resids
unknowns_cache = np.zeros(unknowns.vec.shape)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
basenorm = (normval if (normval > atol) else 1.0)
u_norm = 1e+99
ru_norm = 1e+99
if (iprint == 2):
self.print_norm(self.print_name, system, 1, normval, basenorm)
while ((self.iter_count < maxiter) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
self.iter_count += 1
update_local_meta(local_meta, (self.iter_count,))
unknowns_cache[:] = unknowns.vec
system.children_solve_nonlinear(local_meta)
self.recorders.record_iteration(system, local_meta)
system.apply_nonlinear(params, unknowns, resids)
normval = resids.norm()
u_norm = np.linalg.norm((unknowns.vec - unknowns_cache))
ru_norm = (np.linalg.norm((unknowns.vec - unknowns_cache)) / np.linalg.norm(unknowns.vec))
if self.options['use_aitken']:
if ((type(self.delta_u_n_1) is not str) and (normval > atol) and ((normval / basenorm) > rtol) and (u_norm > utol) and (ru_norm > rutol)):
delta_u_n = (unknowns.vec - unknowns_cache)
delta_u_n_1 = self.delta_u_n_1
self.aitken_alpha = (self.aitken_alpha * (1.0 - (np.dot((delta_u_n - delta_u_n_1), delta_u_n) / (np.linalg.norm((delta_u_n - delta_u_n_1), 2) ** 2))))
self.aitken_alpha = max(self.options['aitken_alpha_min'], min(self.options['aitken_alpha_max'], self.aitken_alpha))
if ((iprint == 1) or (iprint == 2)):
print('Aitken relaxation factor is', self.aitken_alpha)
self.delta_u_n_1 = delta_u_n.copy()
unknowns.vec[:] = (unknowns_cache + (self.aitken_alpha * delta_u_n))
elif (type(self.delta_u_n_1) is str):
self.delta_u_n_1 = (unknowns.vec - unknowns_cache)
if (iprint == 2):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if (iprint == 1):
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, u_norm=u_norm)
if ((self.iter_count >= maxiter) or isnan(normval)):
msg = ('FAILED to converge after %d iterations' % self.iter_count)
fail = True
else:
fail = False
if ((iprint > 0) or (fail and (iprint > (- 1)))):
if (not fail):
msg = ('Converged in %d iterations' % self.iter_count)
self.print_norm(self.print_name, system, self.iter_count, normval, basenorm, msg=msg)
if (fail and self.options['err_on_maxiter']):
raise AnalysisError(("Solve in '%s': NLGaussSeidel %s" % (system.pathname, msg)))<|docstring|>Solves the system using Gauss Seidel.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
system : `System`
Parent `System` object.
metadata : dict, optional
Dictionary containing execution metadata (e.g. iteration coordinate).<|endoftext|> |
aa70849eac878156fe534007ace7cfe7261abd2c900aed6a14b66ff8f4d436ea | def train_classifier(X_train, X_test, y_train, alphas, l1_ratios, seed, n_folds=4, max_iter=1000):
'\n Build the logic and sklearn pipelines to predict binary y from dataset x\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n alphas: list of alphas to perform cross validation over\n l1_ratios: list of l1 mixing parameters to perform cross validation over\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
clf_parameters = {'classify__alpha': alphas, 'classify__l1_ratio': l1_ratios}
estimator = Pipeline(steps=[('classify', SGDClassifier(random_state=seed, class_weight='balanced', loss='log', penalty='elasticnet', max_iter=max_iter, tol=0.001))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='decision_function')
y_predict_train = cv_pipeline.decision_function(X_train)
y_predict_test = cv_pipeline.decision_function(X_test)
return (cv_pipeline, y_predict_train, y_predict_test, y_cv) | Build the logic and sklearn pipelines to predict binary y from dataset x
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
alphas: list of alphas to perform cross validation over
l1_ratios: list of l1 mixing parameters to perform cross validation over
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation | mpmp/prediction/classification.py | train_classifier | greenelab/mpmp | 1 | python | def train_classifier(X_train, X_test, y_train, alphas, l1_ratios, seed, n_folds=4, max_iter=1000):
'\n Build the logic and sklearn pipelines to predict binary y from dataset x\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n alphas: list of alphas to perform cross validation over\n l1_ratios: list of l1 mixing parameters to perform cross validation over\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
clf_parameters = {'classify__alpha': alphas, 'classify__l1_ratio': l1_ratios}
estimator = Pipeline(steps=[('classify', SGDClassifier(random_state=seed, class_weight='balanced', loss='log', penalty='elasticnet', max_iter=max_iter, tol=0.001))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='decision_function')
y_predict_train = cv_pipeline.decision_function(X_train)
y_predict_test = cv_pipeline.decision_function(X_test)
return (cv_pipeline, y_predict_train, y_predict_test, y_cv) | def train_classifier(X_train, X_test, y_train, alphas, l1_ratios, seed, n_folds=4, max_iter=1000):
'\n Build the logic and sklearn pipelines to predict binary y from dataset x\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n alphas: list of alphas to perform cross validation over\n l1_ratios: list of l1 mixing parameters to perform cross validation over\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
clf_parameters = {'classify__alpha': alphas, 'classify__l1_ratio': l1_ratios}
estimator = Pipeline(steps=[('classify', SGDClassifier(random_state=seed, class_weight='balanced', loss='log', penalty='elasticnet', max_iter=max_iter, tol=0.001))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='decision_function')
y_predict_train = cv_pipeline.decision_function(X_train)
y_predict_test = cv_pipeline.decision_function(X_test)
return (cv_pipeline, y_predict_train, y_predict_test, y_cv)<|docstring|>Build the logic and sklearn pipelines to predict binary y from dataset x
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
alphas: list of alphas to perform cross validation over
l1_ratios: list of l1 mixing parameters to perform cross validation over
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation<|endoftext|> |
c6819f54d18c4f90edbfde7984d22a6bf0792ae44a99c29be2db746cf47d173c | def train_gb_classifier(X_train, X_test, y_train, learning_rates, alphas, lambdas, seed, n_folds=4, max_iter=1000):
'\n Fit gradient-boosted tree classifier to training data, and generate predictions\n for test data.\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
from lightgbm import LGBMClassifier
clf_parameters = {'classify__learning_rate': learning_rates, 'classify__reg_alpha': alphas, 'classify__reg_lambda': lambdas}
estimator = Pipeline(steps=[('classify', LGBMClassifier(random_state=seed, class_weight='balanced', max_depth=5, n_estimators=100, colsample_bytree=0.35))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='predict_proba')[(:, 1)]
y_predict_train = cv_pipeline.predict_proba(X_train)[(:, 1)]
y_predict_test = cv_pipeline.predict_proba(X_test)[(:, 1)]
return (cv_pipeline, y_predict_train, y_predict_test, y_cv) | Fit gradient-boosted tree classifier to training data, and generate predictions
for test data.
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation | mpmp/prediction/classification.py | train_gb_classifier | greenelab/mpmp | 1 | python | def train_gb_classifier(X_train, X_test, y_train, learning_rates, alphas, lambdas, seed, n_folds=4, max_iter=1000):
'\n Fit gradient-boosted tree classifier to training data, and generate predictions\n for test data.\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
from lightgbm import LGBMClassifier
clf_parameters = {'classify__learning_rate': learning_rates, 'classify__reg_alpha': alphas, 'classify__reg_lambda': lambdas}
estimator = Pipeline(steps=[('classify', LGBMClassifier(random_state=seed, class_weight='balanced', max_depth=5, n_estimators=100, colsample_bytree=0.35))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='predict_proba')[(:, 1)]
y_predict_train = cv_pipeline.predict_proba(X_train)[(:, 1)]
y_predict_test = cv_pipeline.predict_proba(X_test)[(:, 1)]
return (cv_pipeline, y_predict_train, y_predict_test, y_cv) | def train_gb_classifier(X_train, X_test, y_train, learning_rates, alphas, lambdas, seed, n_folds=4, max_iter=1000):
'\n Fit gradient-boosted tree classifier to training data, and generate predictions\n for test data.\n\n Arguments\n ---------\n X_train: pandas DataFrame of feature matrix for training data\n X_test: pandas DataFrame of feature matrix for testing data\n y_train: pandas DataFrame of processed y matrix (output from align_matrices())\n n_folds: int of how many folds of cross validation to perform\n max_iter: the maximum number of iterations to test until convergence\n\n Returns\n ------\n The full pipeline sklearn object and y matrix predictions for training, testing,\n and cross validation\n '
from lightgbm import LGBMClassifier
clf_parameters = {'classify__learning_rate': learning_rates, 'classify__reg_alpha': alphas, 'classify__reg_lambda': lambdas}
estimator = Pipeline(steps=[('classify', LGBMClassifier(random_state=seed, class_weight='balanced', max_depth=5, n_estimators=100, colsample_bytree=0.35))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters, n_jobs=(- 1), cv=n_folds, scoring='average_precision', return_train_score=True)
cv_pipeline.fit(X=X_train, y=y_train.status)
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=X_train, y=y_train.status, cv=n_folds, method='predict_proba')[(:, 1)]
y_predict_train = cv_pipeline.predict_proba(X_train)[(:, 1)]
y_predict_test = cv_pipeline.predict_proba(X_test)[(:, 1)]
return (cv_pipeline, y_predict_train, y_predict_test, y_cv)<|docstring|>Fit gradient-boosted tree classifier to training data, and generate predictions
for test data.
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation<|endoftext|> |
645a6ed5e2dba03b74d8534453451b033642ce865e12547c9930f385d330970e | def get_preds(X_test_df, y_test_df, cv_pipeline, fold_no):
'Get model-predicted probability of positive class for test data.\n\n Also returns true class, to enable quantitative comparisons in analyses.\n '
y_scores_test = cv_pipeline.decision_function(X_test_df)
y_probs_test = cv_pipeline.predict_proba(X_test_df)
assert np.array_equal(cv_pipeline.best_estimator_.classes_, np.array([0, 1]))
return pd.DataFrame({'fold_no': fold_no, 'true_class': y_test_df.status, 'score': y_scores_test, 'positive_prob': y_probs_test[(:, 1)]}, index=y_test_df.index) | Get model-predicted probability of positive class for test data.
Also returns true class, to enable quantitative comparisons in analyses. | mpmp/prediction/classification.py | get_preds | greenelab/mpmp | 1 | python | def get_preds(X_test_df, y_test_df, cv_pipeline, fold_no):
'Get model-predicted probability of positive class for test data.\n\n Also returns true class, to enable quantitative comparisons in analyses.\n '
y_scores_test = cv_pipeline.decision_function(X_test_df)
y_probs_test = cv_pipeline.predict_proba(X_test_df)
assert np.array_equal(cv_pipeline.best_estimator_.classes_, np.array([0, 1]))
return pd.DataFrame({'fold_no': fold_no, 'true_class': y_test_df.status, 'score': y_scores_test, 'positive_prob': y_probs_test[(:, 1)]}, index=y_test_df.index) | def get_preds(X_test_df, y_test_df, cv_pipeline, fold_no):
'Get model-predicted probability of positive class for test data.\n\n Also returns true class, to enable quantitative comparisons in analyses.\n '
y_scores_test = cv_pipeline.decision_function(X_test_df)
y_probs_test = cv_pipeline.predict_proba(X_test_df)
assert np.array_equal(cv_pipeline.best_estimator_.classes_, np.array([0, 1]))
return pd.DataFrame({'fold_no': fold_no, 'true_class': y_test_df.status, 'score': y_scores_test, 'positive_prob': y_probs_test[(:, 1)]}, index=y_test_df.index)<|docstring|>Get model-predicted probability of positive class for test data.
Also returns true class, to enable quantitative comparisons in analyses.<|endoftext|> |
694f22686604f81267891f046e410da76ab5231d7f68f9753043321357f2852b | def get_threshold_metrics(y_true, y_pred, drop=False):
'\n Retrieve true/false positive rates and auroc/aupr for class predictions\n\n Arguments\n ---------\n y_true: an array of gold standard mutation status\n y_pred: an array of predicted mutation status\n drop: boolean if intermediate thresholds are dropped\n\n Returns\n -------\n dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type\n '
roc_columns = ['fpr', 'tpr', 'threshold']
pr_columns = ['precision', 'recall', 'threshold']
roc_results = roc_curve(y_true, y_pred, drop_intermediate=drop)
roc_items = zip(roc_columns, roc_results)
roc_df = pd.DataFrame.from_dict(dict(roc_items))
(prec, rec, thresh) = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, pd.Series(thresh)], ignore_index=True, axis=1)
pr_df.columns = pr_columns
auroc = roc_auc_score(y_true, y_pred, average='weighted')
aupr = average_precision_score(y_true, y_pred, average='weighted')
return {'auroc': auroc, 'aupr': aupr, 'roc_df': roc_df, 'pr_df': pr_df} | Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments
---------
y_true: an array of gold standard mutation status
y_pred: an array of predicted mutation status
drop: boolean if intermediate thresholds are dropped
Returns
-------
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type | mpmp/prediction/classification.py | get_threshold_metrics | greenelab/mpmp | 1 | python | def get_threshold_metrics(y_true, y_pred, drop=False):
'\n Retrieve true/false positive rates and auroc/aupr for class predictions\n\n Arguments\n ---------\n y_true: an array of gold standard mutation status\n y_pred: an array of predicted mutation status\n drop: boolean if intermediate thresholds are dropped\n\n Returns\n -------\n dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type\n '
roc_columns = ['fpr', 'tpr', 'threshold']
pr_columns = ['precision', 'recall', 'threshold']
roc_results = roc_curve(y_true, y_pred, drop_intermediate=drop)
roc_items = zip(roc_columns, roc_results)
roc_df = pd.DataFrame.from_dict(dict(roc_items))
(prec, rec, thresh) = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, pd.Series(thresh)], ignore_index=True, axis=1)
pr_df.columns = pr_columns
auroc = roc_auc_score(y_true, y_pred, average='weighted')
aupr = average_precision_score(y_true, y_pred, average='weighted')
return {'auroc': auroc, 'aupr': aupr, 'roc_df': roc_df, 'pr_df': pr_df} | def get_threshold_metrics(y_true, y_pred, drop=False):
'\n Retrieve true/false positive rates and auroc/aupr for class predictions\n\n Arguments\n ---------\n y_true: an array of gold standard mutation status\n y_pred: an array of predicted mutation status\n drop: boolean if intermediate thresholds are dropped\n\n Returns\n -------\n dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type\n '
roc_columns = ['fpr', 'tpr', 'threshold']
pr_columns = ['precision', 'recall', 'threshold']
roc_results = roc_curve(y_true, y_pred, drop_intermediate=drop)
roc_items = zip(roc_columns, roc_results)
roc_df = pd.DataFrame.from_dict(dict(roc_items))
(prec, rec, thresh) = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, pd.Series(thresh)], ignore_index=True, axis=1)
pr_df.columns = pr_columns
auroc = roc_auc_score(y_true, y_pred, average='weighted')
aupr = average_precision_score(y_true, y_pred, average='weighted')
return {'auroc': auroc, 'aupr': aupr, 'roc_df': roc_df, 'pr_df': pr_df}<|docstring|>Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments
---------
y_true: an array of gold standard mutation status
y_pred: an array of predicted mutation status
drop: boolean if intermediate thresholds are dropped
Returns
-------
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type<|endoftext|> |
b1bae4ae05cbc7eef9b1ddef8965fe809905411d8b07bbea812b3510a820385b | def summarize_results(results, identifier, training_data, signal, seed, data_type, fold_no):
'\n Given an input results file, summarize and output all pertinent files\n\n Arguments\n ---------\n results: a results object output from `get_threshold_metrics`\n identifier: string describing the label being predicted\n training_data: the data type being used to train the model\n signal: the signal of interest\n seed: the seed used to compress the data\n data_type: the type of data (either training, testing, or cv)\n fold_no: the fold number for the external cross-validation loop\n '
if (not isinstance(training_data, str)):
training_data = '.'.join(training_data)
results_append_list = [identifier, training_data, signal, seed, data_type, fold_no]
metrics_out_ = ([results['auroc'], results['aupr']] + results_append_list)
roc_df_ = results['roc_df']
pr_df_ = results['pr_df']
roc_df_ = roc_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
pr_df_ = pr_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
return (metrics_out_, roc_df_, pr_df_) | Given an input results file, summarize and output all pertinent files
Arguments
---------
results: a results object output from `get_threshold_metrics`
identifier: string describing the label being predicted
training_data: the data type being used to train the model
signal: the signal of interest
seed: the seed used to compress the data
data_type: the type of data (either training, testing, or cv)
fold_no: the fold number for the external cross-validation loop | mpmp/prediction/classification.py | summarize_results | greenelab/mpmp | 1 | python | def summarize_results(results, identifier, training_data, signal, seed, data_type, fold_no):
'\n Given an input results file, summarize and output all pertinent files\n\n Arguments\n ---------\n results: a results object output from `get_threshold_metrics`\n identifier: string describing the label being predicted\n training_data: the data type being used to train the model\n signal: the signal of interest\n seed: the seed used to compress the data\n data_type: the type of data (either training, testing, or cv)\n fold_no: the fold number for the external cross-validation loop\n '
if (not isinstance(training_data, str)):
training_data = '.'.join(training_data)
results_append_list = [identifier, training_data, signal, seed, data_type, fold_no]
metrics_out_ = ([results['auroc'], results['aupr']] + results_append_list)
roc_df_ = results['roc_df']
pr_df_ = results['pr_df']
roc_df_ = roc_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
pr_df_ = pr_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
return (metrics_out_, roc_df_, pr_df_) | def summarize_results(results, identifier, training_data, signal, seed, data_type, fold_no):
'\n Given an input results file, summarize and output all pertinent files\n\n Arguments\n ---------\n results: a results object output from `get_threshold_metrics`\n identifier: string describing the label being predicted\n training_data: the data type being used to train the model\n signal: the signal of interest\n seed: the seed used to compress the data\n data_type: the type of data (either training, testing, or cv)\n fold_no: the fold number for the external cross-validation loop\n '
if (not isinstance(training_data, str)):
training_data = '.'.join(training_data)
results_append_list = [identifier, training_data, signal, seed, data_type, fold_no]
metrics_out_ = ([results['auroc'], results['aupr']] + results_append_list)
roc_df_ = results['roc_df']
pr_df_ = results['pr_df']
roc_df_ = roc_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
pr_df_ = pr_df_.assign(predictor=identifier, training_data=training_data, signal=signal, seed=seed, data_type=data_type, fold_no=fold_no)
return (metrics_out_, roc_df_, pr_df_)<|docstring|>Given an input results file, summarize and output all pertinent files
Arguments
---------
results: a results object output from `get_threshold_metrics`
identifier: string describing the label being predicted
training_data: the data type being used to train the model
signal: the signal of interest
seed: the seed used to compress the data
data_type: the type of data (either training, testing, or cv)
fold_no: the fold number for the external cross-validation loop<|endoftext|> |
fc9b1995516513ae6e48f9c3af73b0ec14f573834931d4c6b6ca3600f9842555 | def run(self):
"The task's main loop.\n\n Processes messages and handles state changes."
with picamera.PiCamera() as cam:
cam.resolution = self.shape
cam.framerate = 8
cam.rotation = 180
cam.exposure_mode = 'sports'
while True:
if (self.requested_state_change is not None):
self.state = self.requested_state_change
self.requested_state_change = None
if (self.state == self.LEARNING):
self._run_learning(cam, self.label)
elif (self.state == self.CLASSIFYING):
self._run_classifying(cam)
elif (not self.process_messages(batch=True)):
break | The task's main loop.
Processes messages and handles state changes. | app/imprint_engine.py | run | YoonChi/alto | 257 | python | def run(self):
"The task's main loop.\n\n Processes messages and handles state changes."
with picamera.PiCamera() as cam:
cam.resolution = self.shape
cam.framerate = 8
cam.rotation = 180
cam.exposure_mode = 'sports'
while True:
if (self.requested_state_change is not None):
self.state = self.requested_state_change
self.requested_state_change = None
if (self.state == self.LEARNING):
self._run_learning(cam, self.label)
elif (self.state == self.CLASSIFYING):
self._run_classifying(cam)
elif (not self.process_messages(batch=True)):
break | def run(self):
"The task's main loop.\n\n Processes messages and handles state changes."
with picamera.PiCamera() as cam:
cam.resolution = self.shape
cam.framerate = 8
cam.rotation = 180
cam.exposure_mode = 'sports'
while True:
if (self.requested_state_change is not None):
self.state = self.requested_state_change
self.requested_state_change = None
if (self.state == self.LEARNING):
self._run_learning(cam, self.label)
elif (self.state == self.CLASSIFYING):
self._run_classifying(cam)
elif (not self.process_messages(batch=True)):
break<|docstring|>The task's main loop.
Processes messages and handles state changes.<|endoftext|> |
09f6c3e19551e71f28378dc2dd2cca258c4220cd9a4065ed5888b4b4a1475d15 | def idle(self):
'Stops learning / classifying.'
self.requested_state_change = self.IDLE | Stops learning / classifying. | app/imprint_engine.py | idle | YoonChi/alto | 257 | python | def idle(self):
self.requested_state_change = self.IDLE | def idle(self):
self.requested_state_change = self.IDLE<|docstring|>Stops learning / classifying.<|endoftext|> |
ed14151e35720f508b76b05dbb181e96a46ec8a22d71b4f0994c2ea59d97400c | def start_learning(self, label):
'Starts learning for the given label.\n\n Args:\n label: Any\n\n If there is already learning data for the given label then it is\n augmented with the new data.'
self.requested_state_change = self.LEARNING
self.label = label | Starts learning for the given label.
Args:
label: Any
If there is already learning data for the given label then it is
augmented with the new data. | app/imprint_engine.py | start_learning | YoonChi/alto | 257 | python | def start_learning(self, label):
'Starts learning for the given label.\n\n Args:\n label: Any\n\n If there is already learning data for the given label then it is\n augmented with the new data.'
self.requested_state_change = self.LEARNING
self.label = label | def start_learning(self, label):
'Starts learning for the given label.\n\n Args:\n label: Any\n\n If there is already learning data for the given label then it is\n augmented with the new data.'
self.requested_state_change = self.LEARNING
self.label = label<|docstring|>Starts learning for the given label.
Args:
label: Any
If there is already learning data for the given label then it is
augmented with the new data.<|endoftext|> |
a91ddb4f88083a6aab5f8b122a22b515c852bab3af229584ff380ee81c4b5b67 | def start_classifying(self):
'Starts classifying images from the camera.'
self.requested_state_change = self.CLASSIFYING | Starts classifying images from the camera. | app/imprint_engine.py | start_classifying | YoonChi/alto | 257 | python | def start_classifying(self):
self.requested_state_change = self.CLASSIFYING | def start_classifying(self):
self.requested_state_change = self.CLASSIFYING<|docstring|>Starts classifying images from the camera.<|endoftext|> |
74b6a9423f36272445708460f2d2016cb83a2e68e0ae6c7cd83933759ad90102 | def reset(self):
'Stops learning / classifying and resets all learning data.'
self.state = self.IDLE
self.label = None
self.engine.clear() | Stops learning / classifying and resets all learning data. | app/imprint_engine.py | reset | YoonChi/alto | 257 | python | def reset(self):
self.state = self.IDLE
self.label = None
self.engine.clear() | def reset(self):
self.state = self.IDLE
self.label = None
self.engine.clear()<|docstring|>Stops learning / classifying and resets all learning data.<|endoftext|> |
3548155842bc7f8e3136fd194b739ddf0d44d59ca18b3fa0fd655977eee05472 | def _get_shape(self):
'Returns the input tensor shape as (width, height).'
input_tensor_shape = self.engine.get_input_tensor_shape()
return (input_tensor_shape[2], input_tensor_shape[1]) | Returns the input tensor shape as (width, height). | app/imprint_engine.py | _get_shape | YoonChi/alto | 257 | python | def _get_shape(self):
input_tensor_shape = self.engine.get_input_tensor_shape()
return (input_tensor_shape[2], input_tensor_shape[1]) | def _get_shape(self):
input_tensor_shape = self.engine.get_input_tensor_shape()
return (input_tensor_shape[2], input_tensor_shape[1])<|docstring|>Returns the input tensor shape as (width, height).<|endoftext|> |
68b8e4b29d570453d155c243129a4a63add1168cb1847134b766dfbb1102b263 | def _get_emb(self, image):
'Returns the embedding vector for the given image.\n\n Args:\n image: numpy.array, a uint8 RGB image with the correct shape.'
return self.engine.RunInference(image.flatten())[1].copy() | Returns the embedding vector for the given image.
Args:
image: numpy.array, a uint8 RGB image with the correct shape. | app/imprint_engine.py | _get_emb | YoonChi/alto | 257 | python | def _get_emb(self, image):
'Returns the embedding vector for the given image.\n\n Args:\n image: numpy.array, a uint8 RGB image with the correct shape.'
return self.engine.RunInference(image.flatten())[1].copy() | def _get_emb(self, image):
'Returns the embedding vector for the given image.\n\n Args:\n image: numpy.array, a uint8 RGB image with the correct shape.'
return self.engine.RunInference(image.flatten())[1].copy()<|docstring|>Returns the embedding vector for the given image.
Args:
image: numpy.array, a uint8 RGB image with the correct shape.<|endoftext|> |
2358e2b565d92b11e634ad1d86d3d18928672db29a6b8d6caaa7f0357409dc4c | def _run_learning(self, cam, label):
'Performs a learning loop until the state changes.\n\n Args:\n cam: The RPi camera.\n label: Any, the label to use for the new data.'
log.info('learning started')
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for (idx, _) in enumerate(gen):
self.engine.add_embedding(label, self._get_emb(output))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
log.info('learning stopped') | Performs a learning loop until the state changes.
Args:
cam: The RPi camera.
label: Any, the label to use for the new data. | app/imprint_engine.py | _run_learning | YoonChi/alto | 257 | python | def _run_learning(self, cam, label):
'Performs a learning loop until the state changes.\n\n Args:\n cam: The RPi camera.\n label: Any, the label to use for the new data.'
log.info('learning started')
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for (idx, _) in enumerate(gen):
self.engine.add_embedding(label, self._get_emb(output))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
log.info('learning stopped') | def _run_learning(self, cam, label):
'Performs a learning loop until the state changes.\n\n Args:\n cam: The RPi camera.\n label: Any, the label to use for the new data.'
log.info('learning started')
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for (idx, _) in enumerate(gen):
self.engine.add_embedding(label, self._get_emb(output))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
log.info('learning stopped')<|docstring|>Performs a learning loop until the state changes.
Args:
cam: The RPi camera.
label: Any, the label to use for the new data.<|endoftext|> |
db6f6a6d3476de847b2437ee8017f651a3f07198ab7c2667496ae78d61e399c9 | def _run_classifying(self, cam):
'Performs a classifying loop until the state changes.\n\n Args:\n cam: The RPi camera.'
log.info('classifying started')
self.results = collections.defaultdict((lambda : InfiniteImpulseResponseFilter(self.iir_weight)))
current_label = None
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for _ in gen:
confidences = self.engine.get_confidences(self._get_emb(output))
self.emit('Engine.confidences', confidences)
log.debug('confidences = %s', confidences)
for (label, confidence) in confidences.items():
self.results[label].update(confidence)
(match_label, iir) = max(self.results.items(), key=(lambda item: item[1].output))
if (iir.output < self.confidence):
match_label = None
if (match_label != current_label):
current_label = match_label
self.emit('Engine.matched', current_label)
for (label, iir) in self.results.items():
iir.reset((1 if (label == current_label) else 0))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
if (current_label is not None):
self.emit('Engine.matched', None)
log.info('classifying stopped') | Performs a classifying loop until the state changes.
Args:
cam: The RPi camera. | app/imprint_engine.py | _run_classifying | YoonChi/alto | 257 | python | def _run_classifying(self, cam):
'Performs a classifying loop until the state changes.\n\n Args:\n cam: The RPi camera.'
log.info('classifying started')
self.results = collections.defaultdict((lambda : InfiniteImpulseResponseFilter(self.iir_weight)))
current_label = None
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for _ in gen:
confidences = self.engine.get_confidences(self._get_emb(output))
self.emit('Engine.confidences', confidences)
log.debug('confidences = %s', confidences)
for (label, confidence) in confidences.items():
self.results[label].update(confidence)
(match_label, iir) = max(self.results.items(), key=(lambda item: item[1].output))
if (iir.output < self.confidence):
match_label = None
if (match_label != current_label):
current_label = match_label
self.emit('Engine.matched', current_label)
for (label, iir) in self.results.items():
iir.reset((1 if (label == current_label) else 0))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
if (current_label is not None):
self.emit('Engine.matched', None)
log.info('classifying stopped') | def _run_classifying(self, cam):
'Performs a classifying loop until the state changes.\n\n Args:\n cam: The RPi camera.'
log.info('classifying started')
self.results = collections.defaultdict((lambda : InfiniteImpulseResponseFilter(self.iir_weight)))
current_label = None
output = np.empty((self.shape[0], self.shape[1], 3), dtype=np.uint8)
gen = cam.capture_continuous(output, format='rgb', use_video_port=True)
for _ in gen:
confidences = self.engine.get_confidences(self._get_emb(output))
self.emit('Engine.confidences', confidences)
log.debug('confidences = %s', confidences)
for (label, confidence) in confidences.items():
self.results[label].update(confidence)
(match_label, iir) = max(self.results.items(), key=(lambda item: item[1].output))
if (iir.output < self.confidence):
match_label = None
if (match_label != current_label):
current_label = match_label
self.emit('Engine.matched', current_label)
for (label, iir) in self.results.items():
iir.reset((1 if (label == current_label) else 0))
if (not self.process_messages(block=False)):
return
if (self.requested_state_change is not None):
break
if (current_label is not None):
self.emit('Engine.matched', None)
log.info('classifying stopped')<|docstring|>Performs a classifying loop until the state changes.
Args:
cam: The RPi camera.<|endoftext|> |
f8aac537fef727bdda530432c58abbcd8037f6efdefb90f96f33b04a27c6cc6d | def __init__(self, weight, value=0):
'Constructor.\n\n Args:\n weight: float, the weight (0-1) to give new inputs.\n value: float, the initial value.'
self.weight = weight
self.output = value | Constructor.
Args:
weight: float, the weight (0-1) to give new inputs.
value: float, the initial value. | app/imprint_engine.py | __init__ | YoonChi/alto | 257 | python | def __init__(self, weight, value=0):
'Constructor.\n\n Args:\n weight: float, the weight (0-1) to give new inputs.\n value: float, the initial value.'
self.weight = weight
self.output = value | def __init__(self, weight, value=0):
'Constructor.\n\n Args:\n weight: float, the weight (0-1) to give new inputs.\n value: float, the initial value.'
self.weight = weight
self.output = value<|docstring|>Constructor.
Args:
weight: float, the weight (0-1) to give new inputs.
value: float, the initial value.<|endoftext|> |
605988015ea32c192d604fe530775153b643279a9ea65e9226c2b0bb43541c95 | def update(self, input):
'Updates the output.\n\n Args:\n input: float, the current input.'
self.output *= (1 - self.weight)
self.output += (input * self.weight) | Updates the output.
Args:
input: float, the current input. | app/imprint_engine.py | update | YoonChi/alto | 257 | python | def update(self, input):
'Updates the output.\n\n Args:\n input: float, the current input.'
self.output *= (1 - self.weight)
self.output += (input * self.weight) | def update(self, input):
'Updates the output.\n\n Args:\n input: float, the current input.'
self.output *= (1 - self.weight)
self.output += (input * self.weight)<|docstring|>Updates the output.
Args:
input: float, the current input.<|endoftext|> |
b6f258458be3b649beba8cc5fda9e6254368cab1dff4f276cec05472ff05706e | def reset(self, value=0):
'Resets the output.\n\n Args:\n value: float, the new output.'
self.output = value | Resets the output.
Args:
value: float, the new output. | app/imprint_engine.py | reset | YoonChi/alto | 257 | python | def reset(self, value=0):
'Resets the output.\n\n Args:\n value: float, the new output.'
self.output = value | def reset(self, value=0):
'Resets the output.\n\n Args:\n value: float, the new output.'
self.output = value<|docstring|>Resets the output.
Args:
value: float, the new output.<|endoftext|> |
525ffdb2060e64d01389bb7533ea9f22a44fe9b9680a668426778af72041dde5 | def __init__(self, model_path):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: str, path to a TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
output_tensors_sizes = self.get_all_output_tensors_sizes()
if (output_tensors_sizes.size != 1):
raise ValueError('Dectection model should have only 1 output tensor!This model has {}.'.format(output_tensors_sizes.size)) | Creates a EmbeddingEngine with given model.
Args:
model_path: str, path to a TF-Lite Flatbuffer file.
Raises:
ValueError: The model output is invalid. | app/imprint_engine.py | __init__ | YoonChi/alto | 257 | python | def __init__(self, model_path):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: str, path to a TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
output_tensors_sizes = self.get_all_output_tensors_sizes()
if (output_tensors_sizes.size != 1):
raise ValueError('Dectection model should have only 1 output tensor!This model has {}.'.format(output_tensors_sizes.size)) | def __init__(self, model_path):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: str, path to a TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
output_tensors_sizes = self.get_all_output_tensors_sizes()
if (output_tensors_sizes.size != 1):
raise ValueError('Dectection model should have only 1 output tensor!This model has {}.'.format(output_tensors_sizes.size))<|docstring|>Creates a EmbeddingEngine with given model.
Args:
model_path: str, path to a TF-Lite Flatbuffer file.
Raises:
ValueError: The model output is invalid.<|endoftext|> |
b169247282b678d08385f1aef0367adb116673bab7e78ac153a25a96120347be | def __init__(self, model_path, k_nearest_neighbors=3, maxlen=1000):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n k_nearest_neighbors: int, the number of neighbors to use for\n confidences.\n maxlen: int, the maximum number of embeddings to store per label.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
self.embedding_map = collections.defaultdict(list)
self.knn = k_nearest_neighbors
self.maxlen = maxlen | Creates a EmbeddingEngine with given model.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
k_nearest_neighbors: int, the number of neighbors to use for
confidences.
maxlen: int, the maximum number of embeddings to store per label.
Raises:
ValueError: The model output is invalid. | app/imprint_engine.py | __init__ | YoonChi/alto | 257 | python | def __init__(self, model_path, k_nearest_neighbors=3, maxlen=1000):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n k_nearest_neighbors: int, the number of neighbors to use for\n confidences.\n maxlen: int, the maximum number of embeddings to store per label.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
self.embedding_map = collections.defaultdict(list)
self.knn = k_nearest_neighbors
self.maxlen = maxlen | def __init__(self, model_path, k_nearest_neighbors=3, maxlen=1000):
'Creates a EmbeddingEngine with given model.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n k_nearest_neighbors: int, the number of neighbors to use for\n confidences.\n maxlen: int, the maximum number of embeddings to store per label.\n\n Raises:\n ValueError: The model output is invalid.\n '
super().__init__(model_path)
self.embedding_map = collections.defaultdict(list)
self.knn = k_nearest_neighbors
self.maxlen = maxlen<|docstring|>Creates a EmbeddingEngine with given model.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
k_nearest_neighbors: int, the number of neighbors to use for
confidences.
maxlen: int, the maximum number of embeddings to store per label.
Raises:
ValueError: The model output is invalid.<|endoftext|> |
17fb6f3c583352bab4d797721202b9372c44e4b6e2a767445d921d019e638c80 | def clear(self):
'Clear the store: forgets all stored embeddings.'
self.embedding_map = collections.defaultdict(list) | Clear the store: forgets all stored embeddings. | app/imprint_engine.py | clear | YoonChi/alto | 257 | python | def clear(self):
self.embedding_map = collections.defaultdict(list) | def clear(self):
self.embedding_map = collections.defaultdict(list)<|docstring|>Clear the store: forgets all stored embeddings.<|endoftext|> |
4153982f74a20f42b34ca88f9f0e942d470819597151a48c722cfdf1190fcd5c | def add_embedding(self, label, emb):
'Add an embedding vector to the store.'
normal = (emb / np.sqrt((emb ** 2).sum()))
embeddings = self.embedding_map[label]
embeddings.append(normal)
if (len(embeddings) > self.maxlen):
self.embedding_map[label] = embeddings[(- self.maxlen):] | Add an embedding vector to the store. | app/imprint_engine.py | add_embedding | YoonChi/alto | 257 | python | def add_embedding(self, label, emb):
normal = (emb / np.sqrt((emb ** 2).sum()))
embeddings = self.embedding_map[label]
embeddings.append(normal)
if (len(embeddings) > self.maxlen):
self.embedding_map[label] = embeddings[(- self.maxlen):] | def add_embedding(self, label, emb):
normal = (emb / np.sqrt((emb ** 2).sum()))
embeddings = self.embedding_map[label]
embeddings.append(normal)
if (len(embeddings) > self.maxlen):
self.embedding_map[label] = embeddings[(- self.maxlen):]<|docstring|>Add an embedding vector to the store.<|endoftext|> |
78a1db3b2f5bd3eb26dcd98ed75bf69241cf1c9d49936dfeecd8375b006a9b34 | def get_confidences(self, query_emb):
'Returns the match confidences for a query embedding.\n\n Args:\n query_emb: The embedding vector to match against.\n\n Returns:\n Dict[Any, float], a mapping of labels to match confidences.'
query_emb = (query_emb / np.sqrt((query_emb ** 2).sum()))
results = {}
for (label, embeds) in self.embedding_map.items():
dists = np.matmul(embeds, query_emb)
if (len(dists) <= self.knn):
k_largest = dists
else:
k_largest = np.partition(dists, (- self.knn))[(- self.knn):]
results[label] = np.average(k_largest)
return results | Returns the match confidences for a query embedding.
Args:
query_emb: The embedding vector to match against.
Returns:
Dict[Any, float], a mapping of labels to match confidences. | app/imprint_engine.py | get_confidences | YoonChi/alto | 257 | python | def get_confidences(self, query_emb):
'Returns the match confidences for a query embedding.\n\n Args:\n query_emb: The embedding vector to match against.\n\n Returns:\n Dict[Any, float], a mapping of labels to match confidences.'
query_emb = (query_emb / np.sqrt((query_emb ** 2).sum()))
results = {}
for (label, embeds) in self.embedding_map.items():
dists = np.matmul(embeds, query_emb)
if (len(dists) <= self.knn):
k_largest = dists
else:
k_largest = np.partition(dists, (- self.knn))[(- self.knn):]
results[label] = np.average(k_largest)
return results | def get_confidences(self, query_emb):
'Returns the match confidences for a query embedding.\n\n Args:\n query_emb: The embedding vector to match against.\n\n Returns:\n Dict[Any, float], a mapping of labels to match confidences.'
query_emb = (query_emb / np.sqrt((query_emb ** 2).sum()))
results = {}
for (label, embeds) in self.embedding_map.items():
dists = np.matmul(embeds, query_emb)
if (len(dists) <= self.knn):
k_largest = dists
else:
k_largest = np.partition(dists, (- self.knn))[(- self.knn):]
results[label] = np.average(k_largest)
return results<|docstring|>Returns the match confidences for a query embedding.
Args:
query_emb: The embedding vector to match against.
Returns:
Dict[Any, float], a mapping of labels to match confidences.<|endoftext|> |
74e37846e88e2b65705953aaafb1160a4dab5eb332d06f52b0058d356110409c | def add_arguments(self, parser):
'\n Adds custom arguments specific to this command.\n '
super(Command, self).add_arguments(parser)
parser.add_argument('release-date', help='Date that the version was released (format: YYYY-MM-DD)')
parser.add_argument('--skip-clean', action='store_false', dest='clean', default=True, help='Skip cleaning up the raw data files')
parser.add_argument('--skip-load', action='store_false', dest='load', default=True, help='Skip loading up the raw data files')
parser.add_argument('--keep-files', action='store_true', dest='keep_files', default=False, help='Keep zip, unzipped, TSV and CSV files')
parser.add_argument('-a', '--app-name', dest='app_name', default='calaccess_raw', help='Name of Django app with models into which data will be imported (if other not calaccess_raw)') | Adds custom arguments specific to this command. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | add_arguments | rkiddy/django-calaccess-raw-data | 48 | python | def add_arguments(self, parser):
'\n \n '
super(Command, self).add_arguments(parser)
parser.add_argument('release-date', help='Date that the version was released (format: YYYY-MM-DD)')
parser.add_argument('--skip-clean', action='store_false', dest='clean', default=True, help='Skip cleaning up the raw data files')
parser.add_argument('--skip-load', action='store_false', dest='load', default=True, help='Skip loading up the raw data files')
parser.add_argument('--keep-files', action='store_true', dest='keep_files', default=False, help='Keep zip, unzipped, TSV and CSV files')
parser.add_argument('-a', '--app-name', dest='app_name', default='calaccess_raw', help='Name of Django app with models into which data will be imported (if other not calaccess_raw)') | def add_arguments(self, parser):
'\n \n '
super(Command, self).add_arguments(parser)
parser.add_argument('release-date', help='Date that the version was released (format: YYYY-MM-DD)')
parser.add_argument('--skip-clean', action='store_false', dest='clean', default=True, help='Skip cleaning up the raw data files')
parser.add_argument('--skip-load', action='store_false', dest='load', default=True, help='Skip loading up the raw data files')
parser.add_argument('--keep-files', action='store_true', dest='keep_files', default=False, help='Keep zip, unzipped, TSV and CSV files')
parser.add_argument('-a', '--app-name', dest='app_name', default='calaccess_raw', help='Name of Django app with models into which data will be imported (if other not calaccess_raw)')<|docstring|>Adds custom arguments specific to this command.<|endoftext|> |
c7a732493fff7f112152798efe23a94aca2facdf59ba6f7ef3fab8ba504db14d | def handle(self, *args, **options):
'\n Make it happen.\n '
super(Command, self).handle(*args, **options)
self.release_date = datetime.strptime(options['release-date'], '%Y-%m-%d').date()
self.app_name = options['app_name']
self.keep_files = options['keep_files']
self.cleaning = options['clean']
self.loading = options['load']
self.data_dir = get_download_directory()
(os.path.exists(self.data_dir) or os.makedirs(self.data_dir))
self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
self.tsv_dir = os.path.join(self.data_dir, 'tsv/')
self.csv_dir = os.path.join(self.data_dir, 'csv/')
(os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir))
try:
version = self.raw_data_versions.get(release_datetime__year=self.release_date.year, release_datetime__month=self.release_date.month, release_datetime__day=self.release_date.day)
except RawDataVersion.DoesNotExist:
raise CommandError('No release from {0} in archive.'.format(self.release_date))
last_started_reprocess = self.get_last_log()
self.resume_mode = False
if last_started_reprocess:
if last_started_reprocess.finish_datetime:
if (last_started_reprocess.version == version):
if (len(last_started_reprocess.called) > 0):
self.log('Resuming previous re-processing job.')
self.resume_mode = True
if self.resume_mode:
self.log_record = last_started_reprocess
else:
self.log_record = self.command_logs.create(version=version, command=self, called_by=self.get_caller_log())
if self.verbosity:
self.header('Copying archived zip file to data directory')
if (not self.resume_mode):
with open(self.zip_path, 'w') as local_zip:
version.zip_file_archive.open()
local_zip.write(version.zip_file_archive.read())
version.zip_file_archive.close()
self.unzip()
self.prep()
if self.verbosity:
self.duration()
if options['clean']:
self.clean()
if self.verbosity:
self.duration()
if options['load']:
self.load()
if self.verbosity:
self.duration()
self.log_record.finish_datetime = now()
self.log_record.save()
if self.verbosity:
self.success('Done!')
logger.info('Done!') | Make it happen. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | handle | rkiddy/django-calaccess-raw-data | 48 | python | def handle(self, *args, **options):
'\n \n '
super(Command, self).handle(*args, **options)
self.release_date = datetime.strptime(options['release-date'], '%Y-%m-%d').date()
self.app_name = options['app_name']
self.keep_files = options['keep_files']
self.cleaning = options['clean']
self.loading = options['load']
self.data_dir = get_download_directory()
(os.path.exists(self.data_dir) or os.makedirs(self.data_dir))
self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
self.tsv_dir = os.path.join(self.data_dir, 'tsv/')
self.csv_dir = os.path.join(self.data_dir, 'csv/')
(os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir))
try:
version = self.raw_data_versions.get(release_datetime__year=self.release_date.year, release_datetime__month=self.release_date.month, release_datetime__day=self.release_date.day)
except RawDataVersion.DoesNotExist:
raise CommandError('No release from {0} in archive.'.format(self.release_date))
last_started_reprocess = self.get_last_log()
self.resume_mode = False
if last_started_reprocess:
if last_started_reprocess.finish_datetime:
if (last_started_reprocess.version == version):
if (len(last_started_reprocess.called) > 0):
self.log('Resuming previous re-processing job.')
self.resume_mode = True
if self.resume_mode:
self.log_record = last_started_reprocess
else:
self.log_record = self.command_logs.create(version=version, command=self, called_by=self.get_caller_log())
if self.verbosity:
self.header('Copying archived zip file to data directory')
if (not self.resume_mode):
with open(self.zip_path, 'w') as local_zip:
version.zip_file_archive.open()
local_zip.write(version.zip_file_archive.read())
version.zip_file_archive.close()
self.unzip()
self.prep()
if self.verbosity:
self.duration()
if options['clean']:
self.clean()
if self.verbosity:
self.duration()
if options['load']:
self.load()
if self.verbosity:
self.duration()
self.log_record.finish_datetime = now()
self.log_record.save()
if self.verbosity:
self.success('Done!')
logger.info('Done!') | def handle(self, *args, **options):
'\n \n '
super(Command, self).handle(*args, **options)
self.release_date = datetime.strptime(options['release-date'], '%Y-%m-%d').date()
self.app_name = options['app_name']
self.keep_files = options['keep_files']
self.cleaning = options['clean']
self.loading = options['load']
self.data_dir = get_download_directory()
(os.path.exists(self.data_dir) or os.makedirs(self.data_dir))
self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
self.tsv_dir = os.path.join(self.data_dir, 'tsv/')
self.csv_dir = os.path.join(self.data_dir, 'csv/')
(os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir))
try:
version = self.raw_data_versions.get(release_datetime__year=self.release_date.year, release_datetime__month=self.release_date.month, release_datetime__day=self.release_date.day)
except RawDataVersion.DoesNotExist:
raise CommandError('No release from {0} in archive.'.format(self.release_date))
last_started_reprocess = self.get_last_log()
self.resume_mode = False
if last_started_reprocess:
if last_started_reprocess.finish_datetime:
if (last_started_reprocess.version == version):
if (len(last_started_reprocess.called) > 0):
self.log('Resuming previous re-processing job.')
self.resume_mode = True
if self.resume_mode:
self.log_record = last_started_reprocess
else:
self.log_record = self.command_logs.create(version=version, command=self, called_by=self.get_caller_log())
if self.verbosity:
self.header('Copying archived zip file to data directory')
if (not self.resume_mode):
with open(self.zip_path, 'w') as local_zip:
version.zip_file_archive.open()
local_zip.write(version.zip_file_archive.read())
version.zip_file_archive.close()
self.unzip()
self.prep()
if self.verbosity:
self.duration()
if options['clean']:
self.clean()
if self.verbosity:
self.duration()
if options['load']:
self.load()
if self.verbosity:
self.duration()
self.log_record.finish_datetime = now()
self.log_record.save()
if self.verbosity:
self.success('Done!')
logger.info('Done!')<|docstring|>Make it happen.<|endoftext|> |
c576d834b2dbe78ae4367c63836c4a5a0fcbe6693d87ff55910bcb3bc1a08a69 | def unzip(self):
'\n Unzip the snapshot file.\n '
if self.verbosity:
self.log(' Unzipping archive')
with zipfile.ZipFile(self.zip_path) as zf:
for member in zf.infolist():
words = member.filename.split('/')
path = self.data_dir
for word in words[:(- 1)]:
(drive, word) = os.path.splitdrive(word)
(head, word) = os.path.split(word)
if (word in (os.curdir, os.pardir, '')):
continue
path = os.path.join(path, word)
zf.extract(member, path) | Unzip the snapshot file. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | unzip | rkiddy/django-calaccess-raw-data | 48 | python | def unzip(self):
'\n \n '
if self.verbosity:
self.log(' Unzipping archive')
with zipfile.ZipFile(self.zip_path) as zf:
for member in zf.infolist():
words = member.filename.split('/')
path = self.data_dir
for word in words[:(- 1)]:
(drive, word) = os.path.splitdrive(word)
(head, word) = os.path.split(word)
if (word in (os.curdir, os.pardir, )):
continue
path = os.path.join(path, word)
zf.extract(member, path) | def unzip(self):
'\n \n '
if self.verbosity:
self.log(' Unzipping archive')
with zipfile.ZipFile(self.zip_path) as zf:
for member in zf.infolist():
words = member.filename.split('/')
path = self.data_dir
for word in words[:(- 1)]:
(drive, word) = os.path.splitdrive(word)
(head, word) = os.path.split(word)
if (word in (os.curdir, os.pardir, )):
continue
path = os.path.join(path, word)
zf.extract(member, path)<|docstring|>Unzip the snapshot file.<|endoftext|> |
68ffd6c6de33b06b811ffd4432a52992f28b24cbb20a371af85e2ffebc493567 | def prep(self):
"\n Rearrange the unzipped files and get rid of the stuff we don't want.\n "
if self.verbosity:
self.log(' Prepping unzipped data')
shutil.move(os.path.join(self.data_dir, 'CalAccess/DATA/CalAccess/DATA/'), self.data_dir)
if os.path.exists(self.tsv_dir):
shutil.rmtree(self.tsv_dir)
shutil.move(os.path.join(self.data_dir, 'DATA/'), self.tsv_dir) | Rearrange the unzipped files and get rid of the stuff we don't want. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | prep | rkiddy/django-calaccess-raw-data | 48 | python | def prep(self):
"\n \n "
if self.verbosity:
self.log(' Prepping unzipped data')
shutil.move(os.path.join(self.data_dir, 'CalAccess/DATA/CalAccess/DATA/'), self.data_dir)
if os.path.exists(self.tsv_dir):
shutil.rmtree(self.tsv_dir)
shutil.move(os.path.join(self.data_dir, 'DATA/'), self.tsv_dir) | def prep(self):
"\n \n "
if self.verbosity:
self.log(' Prepping unzipped data')
shutil.move(os.path.join(self.data_dir, 'CalAccess/DATA/CalAccess/DATA/'), self.data_dir)
if os.path.exists(self.tsv_dir):
shutil.rmtree(self.tsv_dir)
shutil.move(os.path.join(self.data_dir, 'DATA/'), self.tsv_dir)<|docstring|>Rearrange the unzipped files and get rid of the stuff we don't want.<|endoftext|> |
caf921a79f958ef4ef90bac107937fa37f902e47f10b928050f13e01913e9a80 | def clean(self):
'\n Clean up the raw data files from the state so they are ready to get loaded into the database.\n '
if self.verbosity:
self.header('Cleaning data files')
tsv_list = os.listdir(self.tsv_dir)
if self.resume_mode:
prev_cleaned = [(x.file_name + '.TSV') for x in self.log_record.called.filter(command='cleancalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} files already cleaned.'.format(len(prev_cleaned)))
tsv_list = [x for x in tsv_list if (x not in prev_cleaned)]
if self.verbosity:
tsv_list = progress.bar(tsv_list)
for name in tsv_list:
call_command('cleancalaccessrawfile', name, verbosity=self.verbosity, keep_files=self.keep_files) | Clean up the raw data files from the state so they are ready to get loaded into the database. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | clean | rkiddy/django-calaccess-raw-data | 48 | python | def clean(self):
'\n \n '
if self.verbosity:
self.header('Cleaning data files')
tsv_list = os.listdir(self.tsv_dir)
if self.resume_mode:
prev_cleaned = [(x.file_name + '.TSV') for x in self.log_record.called.filter(command='cleancalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} files already cleaned.'.format(len(prev_cleaned)))
tsv_list = [x for x in tsv_list if (x not in prev_cleaned)]
if self.verbosity:
tsv_list = progress.bar(tsv_list)
for name in tsv_list:
call_command('cleancalaccessrawfile', name, verbosity=self.verbosity, keep_files=self.keep_files) | def clean(self):
'\n \n '
if self.verbosity:
self.header('Cleaning data files')
tsv_list = os.listdir(self.tsv_dir)
if self.resume_mode:
prev_cleaned = [(x.file_name + '.TSV') for x in self.log_record.called.filter(command='cleancalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} files already cleaned.'.format(len(prev_cleaned)))
tsv_list = [x for x in tsv_list if (x not in prev_cleaned)]
if self.verbosity:
tsv_list = progress.bar(tsv_list)
for name in tsv_list:
call_command('cleancalaccessrawfile', name, verbosity=self.verbosity, keep_files=self.keep_files)<|docstring|>Clean up the raw data files from the state so they are ready to get loaded into the database.<|endoftext|> |
9d9daef461a940e0b4387e05a2405a129ac6f900ac0e15ca4fc2754b3c4612b0 | def load(self):
'\n Loads the cleaned up csv files into the database.\n '
if self.verbosity:
self.header('Loading data files')
model_list = [x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())]
if self.resume_mode:
prev_loaded = [x.file_name for x in self.log_record.called.filter(command='loadcalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} models already loaded.'.format(len(prev_loaded)))
model_list = [x for x in model_list if (x._meta.db_table not in prev_loaded)]
if self.verbosity:
model_list = progress.bar(model_list)
for model in model_list:
call_command('loadcalaccessrawfile', model.__name__, verbosity=self.verbosity, keep_files=self.keep_files, app_name=self.app_name) | Loads the cleaned up csv files into the database. | example/toolbox/management/commands/reprocesscalaccessrawdata.py | load | rkiddy/django-calaccess-raw-data | 48 | python | def load(self):
'\n \n '
if self.verbosity:
self.header('Loading data files')
model_list = [x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())]
if self.resume_mode:
prev_loaded = [x.file_name for x in self.log_record.called.filter(command='loadcalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} models already loaded.'.format(len(prev_loaded)))
model_list = [x for x in model_list if (x._meta.db_table not in prev_loaded)]
if self.verbosity:
model_list = progress.bar(model_list)
for model in model_list:
call_command('loadcalaccessrawfile', model.__name__, verbosity=self.verbosity, keep_files=self.keep_files, app_name=self.app_name) | def load(self):
'\n \n '
if self.verbosity:
self.header('Loading data files')
model_list = [x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())]
if self.resume_mode:
prev_loaded = [x.file_name for x in self.log_record.called.filter(command='loadcalaccessrawfile', finish_datetime__isnull=False)]
self.log('{} models already loaded.'.format(len(prev_loaded)))
model_list = [x for x in model_list if (x._meta.db_table not in prev_loaded)]
if self.verbosity:
model_list = progress.bar(model_list)
for model in model_list:
call_command('loadcalaccessrawfile', model.__name__, verbosity=self.verbosity, keep_files=self.keep_files, app_name=self.app_name)<|docstring|>Loads the cleaned up csv files into the database.<|endoftext|> |
0dfb0a65c9d27482ec83c0f7e53ac1f3836f37496ed7bf9301ed237aeb0f1371 | def __hash__(self) -> int:
'Overrides the default implementation'
_result_hash: int = ((hash(self.datasource_name) ^ hash(self.data_connector_name)) ^ hash(self.data_asset_name))
if (self.definition is not None):
for (key, value) in self.partition_definition.items():
_result_hash = ((_result_hash ^ hash(key)) ^ hash(str(value)))
return _result_hash | Overrides the default implementation | great_expectations/core/batch.py | __hash__ | aworld1/great_expectations | 1 | python | def __hash__(self) -> int:
_result_hash: int = ((hash(self.datasource_name) ^ hash(self.data_connector_name)) ^ hash(self.data_asset_name))
if (self.definition is not None):
for (key, value) in self.partition_definition.items():
_result_hash = ((_result_hash ^ hash(key)) ^ hash(str(value)))
return _result_hash | def __hash__(self) -> int:
_result_hash: int = ((hash(self.datasource_name) ^ hash(self.data_connector_name)) ^ hash(self.data_asset_name))
if (self.definition is not None):
for (key, value) in self.partition_definition.items():
_result_hash = ((_result_hash ^ hash(key)) ^ hash(str(value)))
return _result_hash<|docstring|>Overrides the default implementation<|endoftext|> |
3973812941afec7b2e8e461a13a3fac9dd0a7134d046f7489270ed054775fcde | @pytest.mark.parametrize('model', [AutoETS, ExponentialSmoothing, SARIMAX, UnobservedComponents, VAR])
def test_random_state(model):
'Function to test random_state parameter.'
obj = model.create_test_instance()
if (model == VAR):
obj.fit(y=y_1, fh=fh)
y = obj.predict()
obj.fit(y=y_1, fh=fh)
y1 = obj.predict()
else:
obj.fit(y=y, fh=fh)
y = obj.predict()
obj.fit(y=y, fh=fh)
y1 = obj.predict()
assert (y == y1) | Function to test random_state parameter. | sktime/forecasting/base/tests/randomtest.py | test_random_state | khrapovs/sktime | 1 | python | @pytest.mark.parametrize('model', [AutoETS, ExponentialSmoothing, SARIMAX, UnobservedComponents, VAR])
def test_random_state(model):
obj = model.create_test_instance()
if (model == VAR):
obj.fit(y=y_1, fh=fh)
y = obj.predict()
obj.fit(y=y_1, fh=fh)
y1 = obj.predict()
else:
obj.fit(y=y, fh=fh)
y = obj.predict()
obj.fit(y=y, fh=fh)
y1 = obj.predict()
assert (y == y1) | @pytest.mark.parametrize('model', [AutoETS, ExponentialSmoothing, SARIMAX, UnobservedComponents, VAR])
def test_random_state(model):
obj = model.create_test_instance()
if (model == VAR):
obj.fit(y=y_1, fh=fh)
y = obj.predict()
obj.fit(y=y_1, fh=fh)
y1 = obj.predict()
else:
obj.fit(y=y, fh=fh)
y = obj.predict()
obj.fit(y=y, fh=fh)
y1 = obj.predict()
assert (y == y1)<|docstring|>Function to test random_state parameter.<|endoftext|> |
12a77b913be0215115585ff275708e3f51ca7fded598c775de81a6850f555c8e | def openVisaResource_1(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-1')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error' | openVisaResource(address)
Creates the intsrument handle
Arguments:
address:GPIB address :Integer | utils.py | openVisaResource_1 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def openVisaResource_1(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-1')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error' | def openVisaResource_1(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-1')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error'<|docstring|>openVisaResource(address)
Creates the intsrument handle
Arguments:
address:GPIB address :Integer<|endoftext|> |
c5d280a313295fdfa9fe4ea990f62fc26ba657bc9d45828a6943b9c6ca29f3b3 | def openVisaResource_2(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-2')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error' | openVisaResource(address)
Creates the intsrument handle
Arguments:
address:GPIB address :Integer | utils.py | openVisaResource_2 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def openVisaResource_2(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-2')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error' | def openVisaResource_2(address, parents):
' \n openVisaResource(address)\n \n Creates the intsrument handle \n\n Arguments:\n \n address:GPIB address :Integer \n '
try:
rm = visa.ResourceManager()
inst_handle = rm.open_resource((('GPIB0::' + str(address)) + '::INSTR'))
parents.update_status('Initialized-2')
return inst_handle
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' connection error'<|docstring|>openVisaResource(address)
Creates the intsrument handle
Arguments:
address:GPIB address :Integer<|endoftext|> |
971df7832706addff3f977aa7bc78e4f80becef6e72e7609681654c21dc397f9 | def close_(parents, address_1=2, address_2=14):
' \n close VisaResource(address)\n \n reset the intsrument handle \n\n Arguments:\n \n address_1:GPIB address0 :Integer \n address_2:GPIB address0 :Integer \n '
inst_handle_1 = openVisaResource_1(address_1, parents)
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
inst_handle_2 = openVisaResource_2(address_2, parents)
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_1(inst_handle_1)
setdefaultParameters_2(inst_handle_2)
parents.update_status('Initialized')
print('Initialized')
return inst_handle_1 | close VisaResource(address)
reset the intsrument handle
Arguments:
address_1:GPIB address0 :Integer
address_2:GPIB address0 :Integer | utils.py | close_ | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def close_(parents, address_1=2, address_2=14):
' \n close VisaResource(address)\n \n reset the intsrument handle \n\n Arguments:\n \n address_1:GPIB address0 :Integer \n address_2:GPIB address0 :Integer \n '
inst_handle_1 = openVisaResource_1(address_1, parents)
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
inst_handle_2 = openVisaResource_2(address_2, parents)
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_1(inst_handle_1)
setdefaultParameters_2(inst_handle_2)
parents.update_status('Initialized')
print('Initialized')
return inst_handle_1 | def close_(parents, address_1=2, address_2=14):
' \n close VisaResource(address)\n \n reset the intsrument handle \n\n Arguments:\n \n address_1:GPIB address0 :Integer \n address_2:GPIB address0 :Integer \n '
inst_handle_1 = openVisaResource_1(address_1, parents)
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
inst_handle_2 = openVisaResource_2(address_2, parents)
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_1(inst_handle_1)
setdefaultParameters_2(inst_handle_2)
parents.update_status('Initialized')
print('Initialized')
return inst_handle_1<|docstring|>close VisaResource(address)
reset the intsrument handle
Arguments:
address_1:GPIB address0 :Integer
address_2:GPIB address0 :Integer<|endoftext|> |
4140e1bce74d11bc685c7d26bdc534937b9d610555652c7aea7d33d0dd4c6ef8 | def initInstrument_1(inst_handle_1, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_1.query('*IDN?')
if do_reset:
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
setdefaultParameters_1(inst_handle_1)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error' | initInstrument(inst_handle)
Initializes the instrument and returns the instrument name
Arguments:
inst_handle:intsrument handle from 'openVisaResource()'
do_reset:True/False | utils.py | initInstrument_1 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def initInstrument_1(inst_handle_1, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_1.query('*IDN?')
if do_reset:
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
setdefaultParameters_1(inst_handle_1)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error' | def initInstrument_1(inst_handle_1, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_1.query('*IDN?')
if do_reset:
inst_handle_1.write('*RST')
inst_handle_1.write('*CLS')
setdefaultParameters_1(inst_handle_1)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error'<|docstring|>initInstrument(inst_handle)
Initializes the instrument and returns the instrument name
Arguments:
inst_handle:intsrument handle from 'openVisaResource()'
do_reset:True/False<|endoftext|> |
79352a4061d94f33dfba02d16e6a5af7d6ade1381ab7eab43d03d446a3994f7b | def initInstrument_2(inst_handle_2, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_2.query('*IDN?')
if do_reset:
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_2(inst_handle_2)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error' | initInstrument(inst_handle)
Initializes the instrument and returns the instrument name
Arguments:
inst_handle:intsrument handle from 'openVisaResource()'
do_reset:True/False | utils.py | initInstrument_2 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def initInstrument_2(inst_handle_2, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_2.query('*IDN?')
if do_reset:
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_2(inst_handle_2)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error' | def initInstrument_2(inst_handle_2, do_reset):
" \n initInstrument(inst_handle)\n \n Initializes the instrument and returns the instrument name\n \n Arguments:\n \n inst_handle:intsrument handle from 'openVisaResource()' \n do_reset:True/False\n \n "
try:
name = inst_handle_2.query('*IDN?')
if do_reset:
inst_handle_2.write('*RST')
inst_handle_2.write('*CLS')
setdefaultParameters_2(inst_handle_2)
return name
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' initInstrument error'<|docstring|>initInstrument(inst_handle)
Initializes the instrument and returns the instrument name
Arguments:
inst_handle:intsrument handle from 'openVisaResource()'
do_reset:True/False<|endoftext|> |
977d40ccaa3f1a6105d603b30744c223dabcd2c54fdf01a22624011a5719e31e | def setdefaultParameters_1(inst_handle_1):
" \n setCorrectionParameters(inst_handle)\n \n corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
set_cal('C', 'D')
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(1), freq=1000, isV=str(0.0001), calc_1='C', calc_2='D')
inst_handle_1.write(':SOUR:VOLT:OFFS:STAT 1')
inst_handle_1.write(':SOUR:VOLT:MODE CONT')
inst_handle_1.write(':TRIG:SOUR EXT')
inst_handle_1.write(':FUNC:CONC ON')
result = inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setdefaultParameters error' | setCorrectionParameters(inst_handle)
corrections before the measurements
Arguments:
inst_handle:instrument handle from 'openVisaResource()' | utils.py | setdefaultParameters_1 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def setdefaultParameters_1(inst_handle_1):
" \n setCorrectionParameters(inst_handle)\n \n corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
set_cal('C', 'D')
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(1), freq=1000, isV=str(0.0001), calc_1='C', calc_2='D')
inst_handle_1.write(':SOUR:VOLT:OFFS:STAT 1')
inst_handle_1.write(':SOUR:VOLT:MODE CONT')
inst_handle_1.write(':TRIG:SOUR EXT')
inst_handle_1.write(':FUNC:CONC ON')
result = inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setdefaultParameters error' | def setdefaultParameters_1(inst_handle_1):
" \n setCorrectionParameters(inst_handle)\n \n corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
set_cal('C', 'D')
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(1), freq=1000, isV=str(0.0001), calc_1='C', calc_2='D')
inst_handle_1.write(':SOUR:VOLT:OFFS:STAT 1')
inst_handle_1.write(':SOUR:VOLT:MODE CONT')
inst_handle_1.write(':TRIG:SOUR EXT')
inst_handle_1.write(':FUNC:CONC ON')
result = inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setdefaultParameters error'<|docstring|>setCorrectionParameters(inst_handle)
corrections before the measurements
Arguments:
inst_handle:instrument handle from 'openVisaResource()'<|endoftext|> |
06ab145c691a446e9e2636d09b16f04c05eb25e7d685e66905acbb25d3c3d20c | def setdefaultParameters_2(inst_handle_2):
' \n setCorrectionParameters()\n \n Carries our any corrections before the measurements \n \n '
try:
inst_handle_2.write('sense:voltage:guard 0')
inst_handle_2.write('calculate:state 0')
inst_handle_2.write('sense:voltage:average:state 0')
inst_handle_2.write('sense:voltage:median:state 0')
inst_handle_2.write('sense:voltage:reference:state 0')
inst_handle_2.write('sense:voltage:range 2')
inst_handle_2.write('display:text:state 1')
inst_handle_2.write('system:zcheck 0')
inst_handle_2.write('system:tstamp:type relative')
inst_handle_2.write('system:lsync:state 0')
inst_handle_2.write('trace:timestamp:format absolute')
inst_handle_2.write('trigger:source immediate')
inst_handle_2.write('voltage:nplc 0.01')
inst_handle_2.write('voltage:digits 3')
inst_handle_2.write('display:text:state 0')
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return 'setdefaultParameters error' | setCorrectionParameters()
Carries our any corrections before the measurements | utils.py | setdefaultParameters_2 | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def setdefaultParameters_2(inst_handle_2):
' \n setCorrectionParameters()\n \n Carries our any corrections before the measurements \n \n '
try:
inst_handle_2.write('sense:voltage:guard 0')
inst_handle_2.write('calculate:state 0')
inst_handle_2.write('sense:voltage:average:state 0')
inst_handle_2.write('sense:voltage:median:state 0')
inst_handle_2.write('sense:voltage:reference:state 0')
inst_handle_2.write('sense:voltage:range 2')
inst_handle_2.write('display:text:state 1')
inst_handle_2.write('system:zcheck 0')
inst_handle_2.write('system:tstamp:type relative')
inst_handle_2.write('system:lsync:state 0')
inst_handle_2.write('trace:timestamp:format absolute')
inst_handle_2.write('trigger:source immediate')
inst_handle_2.write('voltage:nplc 0.01')
inst_handle_2.write('voltage:digits 3')
inst_handle_2.write('display:text:state 0')
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return 'setdefaultParameters error' | def setdefaultParameters_2(inst_handle_2):
' \n setCorrectionParameters()\n \n Carries our any corrections before the measurements \n \n '
try:
inst_handle_2.write('sense:voltage:guard 0')
inst_handle_2.write('calculate:state 0')
inst_handle_2.write('sense:voltage:average:state 0')
inst_handle_2.write('sense:voltage:median:state 0')
inst_handle_2.write('sense:voltage:reference:state 0')
inst_handle_2.write('sense:voltage:range 2')
inst_handle_2.write('display:text:state 1')
inst_handle_2.write('system:zcheck 0')
inst_handle_2.write('system:tstamp:type relative')
inst_handle_2.write('system:lsync:state 0')
inst_handle_2.write('trace:timestamp:format absolute')
inst_handle_2.write('trigger:source immediate')
inst_handle_2.write('voltage:nplc 0.01')
inst_handle_2.write('voltage:digits 3')
inst_handle_2.write('display:text:state 0')
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return 'setdefaultParameters error'<|docstring|>setCorrectionParameters()
Carries our any corrections before the measurements<|endoftext|> |
44bf9ecfa7c4b66ed1c0d1661a48c1e02185586e84493c4658c7710215b287aa | def setCorrectionParameters(inst_handle_1, calc_1, calc_2, cable_length):
' \n setCorrectionParameters(inst_handle,calc_1,calc_2,cable_length)\n \n Carries our any corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n cable_length: Length in meters : Positive Integer \n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n average : is on with 100 as default\n '
set_cal(calc_1, calc_2)
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(cable_length), calc_1=calc_1, calc_2=calc_2)
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setCorrectionParameters error' | setCorrectionParameters(inst_handle,calc_1,calc_2,cable_length)
Carries our any corrections before the measurements
Arguments:
inst_handle:instrument handle from 'openVisaResource()'
cable_length: Length in meters : Positive Integer
Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])
calculation_2-->["Rs","Rp","PHAS","D"]
average : is on with 100 as default | utils.py | setCorrectionParameters | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def setCorrectionParameters(inst_handle_1, calc_1, calc_2, cable_length):
' \n setCorrectionParameters(inst_handle,calc_1,calc_2,cable_length)\n \n Carries our any corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n cable_length: Length in meters : Positive Integer \n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n average : is on with 100 as default\n '
set_cal(calc_1, calc_2)
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(cable_length), calc_1=calc_1, calc_2=calc_2)
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setCorrectionParameters error' | def setCorrectionParameters(inst_handle_1, calc_1, calc_2, cable_length):
' \n setCorrectionParameters(inst_handle,calc_1,calc_2,cable_length)\n \n Carries our any corrections before the measurements \n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n cable_length: Length in meters : Positive Integer \n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n average : is on with 100 as default\n '
set_cal(calc_1, calc_2)
try:
command = ':SENS:CORR:OPEN ON\n ;:SENS:CORR:SHOR ON\n ;:CAL:CABL {clen}\n ;:AVER:COUN 100\n ;:AVER:ON\n ;:CALC1:FORM {calc_1}\n ;:CALC2:FORM {calc_2}'.format(clen=str(cable_length), calc_1=calc_1, calc_2=calc_2)
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('openVisaResource ERROR')
return ' setCorrectionParameters error'<|docstring|>setCorrectionParameters(inst_handle,calc_1,calc_2,cable_length)
Carries our any corrections before the measurements
Arguments:
inst_handle:instrument handle from 'openVisaResource()'
cable_length: Length in meters : Positive Integer
Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])
calculation_2-->["Rs","Rp","PHAS","D"]
average : is on with 100 as default<|endoftext|> |
3e213818b4877d96b9e2e6d2278e97c928dff5c3f55e731da0b6480bf45f2f48 | def setSignalLevelAndFrequency(inst_handle_1, frequency, ac_signl):
' \n SetSignalLevelAndFrequency(inst_handle,frequency, \n is_voltage_signal)\n \n Sets the Signal type(Voltage) and frequency\n \n '
try:
command = ':SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}'.format(freq=frequency, isV=str(ac_signl))
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('setSignalLevelAndFrequency ERROR')
return ' setSignalLevelAndFrequency error' | SetSignalLevelAndFrequency(inst_handle,frequency,
is_voltage_signal)
Sets the Signal type(Voltage) and frequency | utils.py | setSignalLevelAndFrequency | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def setSignalLevelAndFrequency(inst_handle_1, frequency, ac_signl):
' \n SetSignalLevelAndFrequency(inst_handle,frequency, \n is_voltage_signal)\n \n Sets the Signal type(Voltage) and frequency\n \n '
try:
command = ':SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}'.format(freq=frequency, isV=str(ac_signl))
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('setSignalLevelAndFrequency ERROR')
return ' setSignalLevelAndFrequency error' | def setSignalLevelAndFrequency(inst_handle_1, frequency, ac_signl):
' \n SetSignalLevelAndFrequency(inst_handle,frequency, \n is_voltage_signal)\n \n Sets the Signal type(Voltage) and frequency\n \n '
try:
command = ':SOUR:FREQ {freq}\n ;:SOUR:VOLT {isV}'.format(freq=frequency, isV=str(ac_signl))
inst_handle_1.write(command)
return 1
except:
logging.getLogger().error('openVisaResource ERROR', exc_info=True)
print('setSignalLevelAndFrequency ERROR')
return ' setSignalLevelAndFrequency error'<|docstring|>SetSignalLevelAndFrequency(inst_handle,frequency,
is_voltage_signal)
Sets the Signal type(Voltage) and frequency<|endoftext|> |
c7652bf998703bd635570ac8d9059ac3ac82948eec2ad773e48e8d84d30bbf42 | def fetchData(inst_handle_1, v_n, delay):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
WAIT_TIME_SEC = 1.0
try:
inst_handle_1.write((':SOUR:VOLT:OFFS ' + str(v_n)))
inst_handle_1.write(':INIT')
inst_handle_1.write(':TRIG:SOUR INT')
time.sleep(delay)
values = inst_handle_1.query_ascii_values(':FETC?', separator=',', container=np.array)
return values
except:
logging.getLogger().error('fetchData ERROR', exc_info=True)
print('fetchData ERROR')
return (- 1) | fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()' | utils.py | fetchData | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def fetchData(inst_handle_1, v_n, delay):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
WAIT_TIME_SEC = 1.0
try:
inst_handle_1.write((':SOUR:VOLT:OFFS ' + str(v_n)))
inst_handle_1.write(':INIT')
inst_handle_1.write(':TRIG:SOUR INT')
time.sleep(delay)
values = inst_handle_1.query_ascii_values(':FETC?', separator=',', container=np.array)
return values
except:
logging.getLogger().error('fetchData ERROR', exc_info=True)
print('fetchData ERROR')
return (- 1) | def fetchData(inst_handle_1, v_n, delay):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
WAIT_TIME_SEC = 1.0
try:
inst_handle_1.write((':SOUR:VOLT:OFFS ' + str(v_n)))
inst_handle_1.write(':INIT')
inst_handle_1.write(':TRIG:SOUR INT')
time.sleep(delay)
values = inst_handle_1.query_ascii_values(':FETC?', separator=',', container=np.array)
return values
except:
logging.getLogger().error('fetchData ERROR', exc_info=True)
print('fetchData ERROR')
return (- 1)<|docstring|>fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()'<|endoftext|> |
00fb24c994049499f05242036ad81710d23e4f2ede8a88fe14ee1efe9398de01 | def fetch_emv(inst_handle_2):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
try:
inst_handle_2.write('*CLS')
inst_handle_2.write('trace:clear')
inst_handle_2.write('trace:points ', str(100))
value_2 = inst_handle_2.query_ascii_values(':READ?', separator=',', container=np.array)
return value_2[0]
except:
logging.getLogger().error('fetch_emv ERROR', exc_info=True)
print('fetch_emv ERROR')
return (- 1) | fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()' | utils.py | fetch_emv | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def fetch_emv(inst_handle_2):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
try:
inst_handle_2.write('*CLS')
inst_handle_2.write('trace:clear')
inst_handle_2.write('trace:points ', str(100))
value_2 = inst_handle_2.query_ascii_values(':READ?', separator=',', container=np.array)
return value_2[0]
except:
logging.getLogger().error('fetch_emv ERROR', exc_info=True)
print('fetch_emv ERROR')
return (- 1) | def fetch_emv(inst_handle_2):
" \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from 'openVisaResource()'\n \n "
try:
inst_handle_2.write('*CLS')
inst_handle_2.write('trace:clear')
inst_handle_2.write('trace:points ', str(100))
value_2 = inst_handle_2.query_ascii_values(':READ?', separator=',', container=np.array)
return value_2[0]
except:
logging.getLogger().error('fetch_emv ERROR', exc_info=True)
print('fetch_emv ERROR')
return (- 1)<|docstring|>fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()'<|endoftext|> |
7ae809db7cd45059e2da9d26bf5d88080f559c94d957cb52bf26289efbe42231 | def runCVLoop(inst_handle_1, inst_handle_2, VBias, filename, parent, time_s):
' \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n freq:Frequency in HZ at which to measure CV\n VBias:Array with voltage data points at which to measure Capacitance\n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n \n '
print('inst2', inst_handle_2)
calc_1 = Calc_1[0]
calc_2 = Calc_2[0]
C1_data = []
C2_data = []
os.makedirs('./Data/', exist_ok=True)
save_path = './Data/'
if (filename == None):
filename = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = (filename + '.csv')
else:
f = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = ((f + filename) + '.csv')
print(filename)
fullName = os.path.join(save_path, name)
f = open(fullName, 'w+')
if (calc_1 == 'C'):
xaxis1 = 'Cp(F)'
xaxis2 = 'D'
elif (calc_1 == 'Cs'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Q'
elif (calc_1 == 'Z'):
xaxis1 = 'Rs(ohm)'
xaxis2 = 'D'
elif (calc_1 == 'CPRP'):
xaxis1 = 'Cp(F)'
xaxis2 = 'G(s)'
elif (calc_1 == 'Cp'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Rp(Ohms)'
elif (calc_1 == 'Ls'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'Lp'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'C'):
xaxis1 = 'Cs(F)'
xaxis2 = 'Rs(Ohms)'
f.write((((('Voltage (V), Vs Ag/AgCl ,' + xaxis1) + ', ') + xaxis2) + '\r'))
f.close()
parent.update_x_y(xaxis1, xaxis2)
parent.clear_data()
try:
em = []
for (j, v) in enumerate(VBias):
parent.update_status('Running')
time.sleep(time_s)
v = round(v, 2)
data = fetchData(inst_handle_1, v, time_s)
emv = fetch_emv(inst_handle_2)
print('this is data fetched', v, data, emv)
em.append(emv)
C1 = data[1]
C2 = data[2]
f = open(fullName, 'a')
f.write((((((((str(v) + ',') + str(emv)) + ',') + str(C1)) + ',') + str(C2)) + '\r'))
f.close()
C1_data.append(C1)
C2_data.append(C2)
print((('Voltage:' + str(v)) + ' V'))
parent.update_data(emv, C1, C2, j)
parent.update_()
fetchData(inst_handle_1, 0, time_s)
parent.update_status('End')
parent.No_update()
parent.clear_plot()
parent.updatePlot(em, C1_data, C2_data, xaxis1, xaxis2)
except:
parent.update_status('ERROR')
logging.getLogger().error('runCVLoop ERROR', exc_info=True)
print('runCVLoop ERROR')
return (- 1) | fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()'
freq:Frequency in HZ at which to measure CV
VBias:Array with voltage data points at which to measure Capacitance
Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])
calculation_2-->["Rs","Rp","PHAS","D"] | utils.py | runCVLoop | dnsmalla/Water-splitting-measurement-LCR-GUI | 0 | python | def runCVLoop(inst_handle_1, inst_handle_2, VBias, filename, parent, time_s):
' \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n freq:Frequency in HZ at which to measure CV\n VBias:Array with voltage data points at which to measure Capacitance\n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n \n '
print('inst2', inst_handle_2)
calc_1 = Calc_1[0]
calc_2 = Calc_2[0]
C1_data = []
C2_data = []
os.makedirs('./Data/', exist_ok=True)
save_path = './Data/'
if (filename == None):
filename = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = (filename + '.csv')
else:
f = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = ((f + filename) + '.csv')
print(filename)
fullName = os.path.join(save_path, name)
f = open(fullName, 'w+')
if (calc_1 == 'C'):
xaxis1 = 'Cp(F)'
xaxis2 = 'D'
elif (calc_1 == 'Cs'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Q'
elif (calc_1 == 'Z'):
xaxis1 = 'Rs(ohm)'
xaxis2 = 'D'
elif (calc_1 == 'CPRP'):
xaxis1 = 'Cp(F)'
xaxis2 = 'G(s)'
elif (calc_1 == 'Cp'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Rp(Ohms)'
elif (calc_1 == 'Ls'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'Lp'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'C'):
xaxis1 = 'Cs(F)'
xaxis2 = 'Rs(Ohms)'
f.write((((('Voltage (V), Vs Ag/AgCl ,' + xaxis1) + ', ') + xaxis2) + '\r'))
f.close()
parent.update_x_y(xaxis1, xaxis2)
parent.clear_data()
try:
em = []
for (j, v) in enumerate(VBias):
parent.update_status('Running')
time.sleep(time_s)
v = round(v, 2)
data = fetchData(inst_handle_1, v, time_s)
emv = fetch_emv(inst_handle_2)
print('this is data fetched', v, data, emv)
em.append(emv)
C1 = data[1]
C2 = data[2]
f = open(fullName, 'a')
f.write((((((((str(v) + ',') + str(emv)) + ',') + str(C1)) + ',') + str(C2)) + '\r'))
f.close()
C1_data.append(C1)
C2_data.append(C2)
print((('Voltage:' + str(v)) + ' V'))
parent.update_data(emv, C1, C2, j)
parent.update_()
fetchData(inst_handle_1, 0, time_s)
parent.update_status('End')
parent.No_update()
parent.clear_plot()
parent.updatePlot(em, C1_data, C2_data, xaxis1, xaxis2)
except:
parent.update_status('ERROR')
logging.getLogger().error('runCVLoop ERROR', exc_info=True)
print('runCVLoop ERROR')
return (- 1) | def runCVLoop(inst_handle_1, inst_handle_2, VBias, filename, parent, time_s):
' \n fetchData(inst_handle):\n \n Fetches the data\n \n Arguments:\n \n inst_handle:instrument handle from \'openVisaResource()\'\n freq:Frequency in HZ at which to measure CV\n VBias:Array with voltage data points at which to measure Capacitance\n Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])\n calculation_2-->["Rs","Rp","PHAS","D"]\n \n '
print('inst2', inst_handle_2)
calc_1 = Calc_1[0]
calc_2 = Calc_2[0]
C1_data = []
C2_data = []
os.makedirs('./Data/', exist_ok=True)
save_path = './Data/'
if (filename == None):
filename = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = (filename + '.csv')
else:
f = strftime('%Y-%m-%d-%H-%M-%S', gmtime())
name = ((f + filename) + '.csv')
print(filename)
fullName = os.path.join(save_path, name)
f = open(fullName, 'w+')
if (calc_1 == 'C'):
xaxis1 = 'Cp(F)'
xaxis2 = 'D'
elif (calc_1 == 'Cs'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Q'
elif (calc_1 == 'Z'):
xaxis1 = 'Rs(ohm)'
xaxis2 = 'D'
elif (calc_1 == 'CPRP'):
xaxis1 = 'Cp(F)'
xaxis2 = 'G(s)'
elif (calc_1 == 'Cp'):
xaxis1 = 'Cp(F)'
xaxis2 = 'Rp(Ohms)'
elif (calc_1 == 'Ls'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'Lp'):
xaxis1 = 'Lp(H)'
xaxis2 = 'D'
elif (calc_1 == 'C'):
xaxis1 = 'Cs(F)'
xaxis2 = 'Rs(Ohms)'
f.write((((('Voltage (V), Vs Ag/AgCl ,' + xaxis1) + ', ') + xaxis2) + '\r'))
f.close()
parent.update_x_y(xaxis1, xaxis2)
parent.clear_data()
try:
em = []
for (j, v) in enumerate(VBias):
parent.update_status('Running')
time.sleep(time_s)
v = round(v, 2)
data = fetchData(inst_handle_1, v, time_s)
emv = fetch_emv(inst_handle_2)
print('this is data fetched', v, data, emv)
em.append(emv)
C1 = data[1]
C2 = data[2]
f = open(fullName, 'a')
f.write((((((((str(v) + ',') + str(emv)) + ',') + str(C1)) + ',') + str(C2)) + '\r'))
f.close()
C1_data.append(C1)
C2_data.append(C2)
print((('Voltage:' + str(v)) + ' V'))
parent.update_data(emv, C1, C2, j)
parent.update_()
fetchData(inst_handle_1, 0, time_s)
parent.update_status('End')
parent.No_update()
parent.clear_plot()
parent.updatePlot(em, C1_data, C2_data, xaxis1, xaxis2)
except:
parent.update_status('ERROR')
logging.getLogger().error('runCVLoop ERROR', exc_info=True)
print('runCVLoop ERROR')
return (- 1)<|docstring|>fetchData(inst_handle):
Fetches the data
Arguments:
inst_handle:instrument handle from 'openVisaResource()'
freq:Frequency in HZ at which to measure CV
VBias:Array with voltage data points at which to measure Capacitance
Calcuation_1-->["Z","C","Cs","CPRP","Cp","Ls","Lp"])
calculation_2-->["Rs","Rp","PHAS","D"]<|endoftext|> |
7d595eed4dc9a67709b151d58a7dcda2f8f04e70f98d9dd8cffeedcbd695c6b1 | def prepare_data(self):
'Prepares the data'
self.counter_attack_vs_flying = self.close_enemies_to_base = False
self.structures = self.units.structure
self.initialize_bases()
self.initialize_units()
self.initialize_buildings()
self.initialize_enemies()
self.close_enemy_production = self.check_for_proxy_buildings()
self.floating_buildings_bm = self.check_for_floating_buildings()
self.one_base_play = self.check_for_second_bases() | Prepares the data | data_container.py | prepare_data | drakonnan1st/JackBot | 0 | python | def prepare_data(self):
self.counter_attack_vs_flying = self.close_enemies_to_base = False
self.structures = self.units.structure
self.initialize_bases()
self.initialize_units()
self.initialize_buildings()
self.initialize_enemies()
self.close_enemy_production = self.check_for_proxy_buildings()
self.floating_buildings_bm = self.check_for_floating_buildings()
self.one_base_play = self.check_for_second_bases() | def prepare_data(self):
self.counter_attack_vs_flying = self.close_enemies_to_base = False
self.structures = self.units.structure
self.initialize_bases()
self.initialize_units()
self.initialize_buildings()
self.initialize_enemies()
self.close_enemy_production = self.check_for_proxy_buildings()
self.floating_buildings_bm = self.check_for_floating_buildings()
self.one_base_play = self.check_for_second_bases()<|docstring|>Prepares the data<|endoftext|> |
2b64e4f3771bc3c4b42e105f1fe98958540a6e4119a35a246ab0dffd9ae229cb | def check_for_proxy_buildings(self) -> bool:
'Check if there are any proxy buildings'
return bool(self.enemy_structures.of_type({BARRACKS, GATEWAY, HATCHERY}).closer_than(75, self.start_location)) | Check if there are any proxy buildings | data_container.py | check_for_proxy_buildings | drakonnan1st/JackBot | 0 | python | def check_for_proxy_buildings(self) -> bool:
return bool(self.enemy_structures.of_type({BARRACKS, GATEWAY, HATCHERY}).closer_than(75, self.start_location)) | def check_for_proxy_buildings(self) -> bool:
return bool(self.enemy_structures.of_type({BARRACKS, GATEWAY, HATCHERY}).closer_than(75, self.start_location))<|docstring|>Check if there are any proxy buildings<|endoftext|> |
308c05602924918f41171834b60246679363f42d782a96c7ecea98af5577f3c5 | def check_for_floating_buildings(self) -> bool:
'Check if some terran wants to be funny with lifting up'
return bool((self.enemy_structures.flying and (len(self.enemy_structures) == len(self.enemy_structures.flying)) and (self.time > 300))) | Check if some terran wants to be funny with lifting up | data_container.py | check_for_floating_buildings | drakonnan1st/JackBot | 0 | python | def check_for_floating_buildings(self) -> bool:
return bool((self.enemy_structures.flying and (len(self.enemy_structures) == len(self.enemy_structures.flying)) and (self.time > 300))) | def check_for_floating_buildings(self) -> bool:
return bool((self.enemy_structures.flying and (len(self.enemy_structures) == len(self.enemy_structures.flying)) and (self.time > 300)))<|docstring|>Check if some terran wants to be funny with lifting up<|endoftext|> |
5b0653a01c517f13bc0ce6835cdce256b768df943ac0ed59a3352376256460e4 | def check_for_second_bases(self) -> bool:
'Check if its a one base play'
return bool((self.overlords and (not self.enemy_structures.of_type({NEXUS, COMMANDCENTER, HATCHERY}).closer_than(25, self.overlords.furthest_to(self.start_location))) and (self.time > 165) and (not self.close_enemy_production))) | Check if its a one base play | data_container.py | check_for_second_bases | drakonnan1st/JackBot | 0 | python | def check_for_second_bases(self) -> bool:
return bool((self.overlords and (not self.enemy_structures.of_type({NEXUS, COMMANDCENTER, HATCHERY}).closer_than(25, self.overlords.furthest_to(self.start_location))) and (self.time > 165) and (not self.close_enemy_production))) | def check_for_second_bases(self) -> bool:
return bool((self.overlords and (not self.enemy_structures.of_type({NEXUS, COMMANDCENTER, HATCHERY}).closer_than(25, self.overlords.furthest_to(self.start_location))) and (self.time > 165) and (not self.close_enemy_production)))<|docstring|>Check if its a one base play<|endoftext|> |
73e2fbda0bb8f2eaf37214cdf6f11ced1272d9dfbeaa14db8c951eb02692e6d7 | def prepare_enemy_data_points(self):
'Prepare data related to enemy units'
if self.enemies:
excluded_from_flying = {DRONE, SCV, PROBE, OVERLORD, OVERSEER, RAVEN, OBSERVER, WARPPRISM, MEDIVAC, VIPER, CORRUPTOR}
for hatch in self.townhalls:
close_enemy = self.ground_enemies.closer_than(20, hatch.position)
close_enemy_flying = self.flying_enemies.exclude_type(excluded_from_flying).closer_than(30, hatch.position)
if (close_enemy and (not self.close_enemies_to_base)):
self.close_enemies_to_base = True
if (close_enemy_flying and (not self.counter_attack_vs_flying)):
self.counter_attack_vs_flying = True | Prepare data related to enemy units | data_container.py | prepare_enemy_data_points | drakonnan1st/JackBot | 0 | python | def prepare_enemy_data_points(self):
if self.enemies:
excluded_from_flying = {DRONE, SCV, PROBE, OVERLORD, OVERSEER, RAVEN, OBSERVER, WARPPRISM, MEDIVAC, VIPER, CORRUPTOR}
for hatch in self.townhalls:
close_enemy = self.ground_enemies.closer_than(20, hatch.position)
close_enemy_flying = self.flying_enemies.exclude_type(excluded_from_flying).closer_than(30, hatch.position)
if (close_enemy and (not self.close_enemies_to_base)):
self.close_enemies_to_base = True
if (close_enemy_flying and (not self.counter_attack_vs_flying)):
self.counter_attack_vs_flying = True | def prepare_enemy_data_points(self):
if self.enemies:
excluded_from_flying = {DRONE, SCV, PROBE, OVERLORD, OVERSEER, RAVEN, OBSERVER, WARPPRISM, MEDIVAC, VIPER, CORRUPTOR}
for hatch in self.townhalls:
close_enemy = self.ground_enemies.closer_than(20, hatch.position)
close_enemy_flying = self.flying_enemies.exclude_type(excluded_from_flying).closer_than(30, hatch.position)
if (close_enemy and (not self.close_enemies_to_base)):
self.close_enemies_to_base = True
if (close_enemy_flying and (not self.counter_attack_vs_flying)):
self.counter_attack_vs_flying = True<|docstring|>Prepare data related to enemy units<|endoftext|> |
ab0072f03178ac45ab2eb7f15bb48d2f97018757615978dd2e4bcb3611c5bbae | def initialize_bases(self):
'Initialize the bases'
self.hatcheries = self.units(HATCHERY)
self.lairs = self.units(LAIR)
self.hives = self.units(HIVE)
self.prepare_bases_data() | Initialize the bases | data_container.py | initialize_bases | drakonnan1st/JackBot | 0 | python | def initialize_bases(self):
self.hatcheries = self.units(HATCHERY)
self.lairs = self.units(LAIR)
self.hives = self.units(HIVE)
self.prepare_bases_data() | def initialize_bases(self):
self.hatcheries = self.units(HATCHERY)
self.lairs = self.units(LAIR)
self.hives = self.units(HIVE)
self.prepare_bases_data()<|docstring|>Initialize the bases<|endoftext|> |
ed3024e6abffe98cf3e015be65d1cda66c17a57fb632dc36648b3b24ca83f1d7 | def initialize_units(self):
'Initialize our units'
self.overlords = self.units(OVERLORD)
self.drones = self.units(DRONE)
self.queens = self.units(QUEEN)
self.zerglings = (self.units(ZERGLING).tags_not_in(self.burrowed_lings) if self.burrowed_lings else self.units(ZERGLING))
self.ultralisks = self.units(ULTRALISK)
self.overseers = self.units(OVERSEER)
self.mutalisks = self.units(MUTALISK)
self.larvae = self.units(LARVA)
self.hydras = self.units(HYDRALISK) | Initialize our units | data_container.py | initialize_units | drakonnan1st/JackBot | 0 | python | def initialize_units(self):
self.overlords = self.units(OVERLORD)
self.drones = self.units(DRONE)
self.queens = self.units(QUEEN)
self.zerglings = (self.units(ZERGLING).tags_not_in(self.burrowed_lings) if self.burrowed_lings else self.units(ZERGLING))
self.ultralisks = self.units(ULTRALISK)
self.overseers = self.units(OVERSEER)
self.mutalisks = self.units(MUTALISK)
self.larvae = self.units(LARVA)
self.hydras = self.units(HYDRALISK) | def initialize_units(self):
self.overlords = self.units(OVERLORD)
self.drones = self.units(DRONE)
self.queens = self.units(QUEEN)
self.zerglings = (self.units(ZERGLING).tags_not_in(self.burrowed_lings) if self.burrowed_lings else self.units(ZERGLING))
self.ultralisks = self.units(ULTRALISK)
self.overseers = self.units(OVERSEER)
self.mutalisks = self.units(MUTALISK)
self.larvae = self.units(LARVA)
self.hydras = self.units(HYDRALISK)<|docstring|>Initialize our units<|endoftext|> |
e40d37085b0a357af12b595c610775dec94e6fb15af0f29d41c4c8b96084a194 | def initialize_buildings(self):
'Initialize our buildings'
self.evochambers = self.units(EVOLUTIONCHAMBER)
self.caverns = self.units(ULTRALISKCAVERN)
self.hydradens = self.units(HYDRALISKDEN)
self.pools = self.units(SPAWNINGPOOL)
self.pits = self.units(INFESTATIONPIT)
self.spines = self.units(SPINECRAWLER)
self.tumors = self.units.of_type({CREEPTUMORQUEEN, CREEPTUMOR, CREEPTUMORBURROWED})
self.extractors = self.units(EXTRACTOR)
self.spores = self.units(SPORECRAWLER)
self.spires = self.units(SPIRE) | Initialize our buildings | data_container.py | initialize_buildings | drakonnan1st/JackBot | 0 | python | def initialize_buildings(self):
self.evochambers = self.units(EVOLUTIONCHAMBER)
self.caverns = self.units(ULTRALISKCAVERN)
self.hydradens = self.units(HYDRALISKDEN)
self.pools = self.units(SPAWNINGPOOL)
self.pits = self.units(INFESTATIONPIT)
self.spines = self.units(SPINECRAWLER)
self.tumors = self.units.of_type({CREEPTUMORQUEEN, CREEPTUMOR, CREEPTUMORBURROWED})
self.extractors = self.units(EXTRACTOR)
self.spores = self.units(SPORECRAWLER)
self.spires = self.units(SPIRE) | def initialize_buildings(self):
self.evochambers = self.units(EVOLUTIONCHAMBER)
self.caverns = self.units(ULTRALISKCAVERN)
self.hydradens = self.units(HYDRALISKDEN)
self.pools = self.units(SPAWNINGPOOL)
self.pits = self.units(INFESTATIONPIT)
self.spines = self.units(SPINECRAWLER)
self.tumors = self.units.of_type({CREEPTUMORQUEEN, CREEPTUMOR, CREEPTUMORBURROWED})
self.extractors = self.units(EXTRACTOR)
self.spores = self.units(SPORECRAWLER)
self.spires = self.units(SPIRE)<|docstring|>Initialize our buildings<|endoftext|> |
a4caad6870898e0b6bdbe5e0d4881b9e5641c06db95a3c04456d5f81b2842654 | def initialize_enemies(self):
'Initialize everything related to enemies'
excluded_from_ground = {DRONE, SCV, PROBE}
self.enemies = self.known_enemy_units
self.flying_enemies = self.enemies.flying
self.ground_enemies = self.enemies.not_flying.not_structure.exclude_type(excluded_from_ground)
self.enemy_structures = self.known_enemy_structures
self.prepare_enemy_data_points() | Initialize everything related to enemies | data_container.py | initialize_enemies | drakonnan1st/JackBot | 0 | python | def initialize_enemies(self):
excluded_from_ground = {DRONE, SCV, PROBE}
self.enemies = self.known_enemy_units
self.flying_enemies = self.enemies.flying
self.ground_enemies = self.enemies.not_flying.not_structure.exclude_type(excluded_from_ground)
self.enemy_structures = self.known_enemy_structures
self.prepare_enemy_data_points() | def initialize_enemies(self):
excluded_from_ground = {DRONE, SCV, PROBE}
self.enemies = self.known_enemy_units
self.flying_enemies = self.enemies.flying
self.ground_enemies = self.enemies.not_flying.not_structure.exclude_type(excluded_from_ground)
self.enemy_structures = self.known_enemy_structures
self.prepare_enemy_data_points()<|docstring|>Initialize everything related to enemies<|endoftext|> |
3e10cdab99d6c90931b1ba415120187c7528033616dbd06f652184560a4664c8 | def prepare_bases_data(self):
'Prepare data related to our bases'
if self.townhalls:
self.furthest_townhall_to_map_center = self.townhalls.furthest_to(self.game_info.map_center) | Prepare data related to our bases | data_container.py | prepare_bases_data | drakonnan1st/JackBot | 0 | python | def prepare_bases_data(self):
if self.townhalls:
self.furthest_townhall_to_map_center = self.townhalls.furthest_to(self.game_info.map_center) | def prepare_bases_data(self):
if self.townhalls:
self.furthest_townhall_to_map_center = self.townhalls.furthest_to(self.game_info.map_center)<|docstring|>Prepare data related to our bases<|endoftext|> |
3caaa0bc00f2ea5bf54f72a2344e1f33e36a416125de8cea73dce1dd11dce6d7 | def duplicates(self, other_rule):
"Returns True if rules have got same values in fields defined in\n 'duplicates_compare_fields' list.\n\n In case when subclass don't have defined any field in\n duplicates_compare_fields, only rule types are compared.\n "
if (self.rule_type != other_rule.rule_type):
return False
if self.duplicates_compare_fields:
for field in self.duplicates_compare_fields:
if (getattr(self, field) != getattr(other_rule, field)):
return False
return True | Returns True if rules have got same values in fields defined in
'duplicates_compare_fields' list.
In case when subclass don't have defined any field in
duplicates_compare_fields, only rule types are compared. | neutron/objects/qos/rule.py | duplicates | urimeba/neutron | 1,080 | python | def duplicates(self, other_rule):
"Returns True if rules have got same values in fields defined in\n 'duplicates_compare_fields' list.\n\n In case when subclass don't have defined any field in\n duplicates_compare_fields, only rule types are compared.\n "
if (self.rule_type != other_rule.rule_type):
return False
if self.duplicates_compare_fields:
for field in self.duplicates_compare_fields:
if (getattr(self, field) != getattr(other_rule, field)):
return False
return True | def duplicates(self, other_rule):
"Returns True if rules have got same values in fields defined in\n 'duplicates_compare_fields' list.\n\n In case when subclass don't have defined any field in\n duplicates_compare_fields, only rule types are compared.\n "
if (self.rule_type != other_rule.rule_type):
return False
if self.duplicates_compare_fields:
for field in self.duplicates_compare_fields:
if (getattr(self, field) != getattr(other_rule, field)):
return False
return True<|docstring|>Returns True if rules have got same values in fields defined in
'duplicates_compare_fields' list.
In case when subclass don't have defined any field in
duplicates_compare_fields, only rule types are compared.<|endoftext|> |
067cdd9259f87aa196d78f78053972e28a8262355f3208fdeadf9a5425f02bb8 | def should_apply_to_port(self, port):
'Check whether a rule can be applied to a specific port.\n\n This function has the logic to decide whether a rule should\n be applied to a port or not, depending on the source of the\n policy (network, or port). Eventually rules could override\n this method, or we could make it abstract to allow different\n rule behaviour.\n '
is_port_policy = (self.qos_policy_id == port[qos_consts.QOS_POLICY_ID])
is_network_policy_only = (port[qos_consts.QOS_POLICY_ID] is None)
is_network_device_port = any((port['device_owner'].startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES))
is_router_gw = any((port['device_owner'].startswith(prefix) for prefix in [constants.DEVICE_OWNER_AGENT_GW, constants.DEVICE_OWNER_ROUTER_GW]))
return (is_port_policy or ((is_router_gw or (not is_network_device_port)) and is_network_policy_only)) | Check whether a rule can be applied to a specific port.
This function has the logic to decide whether a rule should
be applied to a port or not, depending on the source of the
policy (network, or port). Eventually rules could override
this method, or we could make it abstract to allow different
rule behaviour. | neutron/objects/qos/rule.py | should_apply_to_port | urimeba/neutron | 1,080 | python | def should_apply_to_port(self, port):
'Check whether a rule can be applied to a specific port.\n\n This function has the logic to decide whether a rule should\n be applied to a port or not, depending on the source of the\n policy (network, or port). Eventually rules could override\n this method, or we could make it abstract to allow different\n rule behaviour.\n '
is_port_policy = (self.qos_policy_id == port[qos_consts.QOS_POLICY_ID])
is_network_policy_only = (port[qos_consts.QOS_POLICY_ID] is None)
is_network_device_port = any((port['device_owner'].startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES))
is_router_gw = any((port['device_owner'].startswith(prefix) for prefix in [constants.DEVICE_OWNER_AGENT_GW, constants.DEVICE_OWNER_ROUTER_GW]))
return (is_port_policy or ((is_router_gw or (not is_network_device_port)) and is_network_policy_only)) | def should_apply_to_port(self, port):
'Check whether a rule can be applied to a specific port.\n\n This function has the logic to decide whether a rule should\n be applied to a port or not, depending on the source of the\n policy (network, or port). Eventually rules could override\n this method, or we could make it abstract to allow different\n rule behaviour.\n '
is_port_policy = (self.qos_policy_id == port[qos_consts.QOS_POLICY_ID])
is_network_policy_only = (port[qos_consts.QOS_POLICY_ID] is None)
is_network_device_port = any((port['device_owner'].startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES))
is_router_gw = any((port['device_owner'].startswith(prefix) for prefix in [constants.DEVICE_OWNER_AGENT_GW, constants.DEVICE_OWNER_ROUTER_GW]))
return (is_port_policy or ((is_router_gw or (not is_network_device_port)) and is_network_policy_only))<|docstring|>Check whether a rule can be applied to a specific port.
This function has the logic to decide whether a rule should
be applied to a port or not, depending on the source of the
policy (network, or port). Eventually rules could override
this method, or we could make it abstract to allow different
rule behaviour.<|endoftext|> |
08c69e28353c6e89dc47636cbbde919804f7b4bac147eb1ca0d1cd77dd708f7a | def write_proto(self, dst_path: str, package_name: str):
'Write the protobuf code for the graph to file.\n\n Args:\n dst_path (str): Path to the output directory where code has to be\n written.\n package_name (str): Package name for the proto code.\n '
assert isinstance(dst_path, str), "Invalid parameter 'dst_path' must be 'str'."
outFile = open((dst_path + 'schema.proto'), 'w')
(class_to_prop, prop_to_class, enumerations) = self.__get_values()
proto_string = ''
proto_string += self.__get_header(package_name)
proto_string += self.__get_options()
proto_string += self.__get_datatypes()
proto_string += self.__class_to_proto(class_to_prop, enumerations)
proto_string += self.__enum_to_proto(class_to_prop, enumerations)
proto_string += self.__prop_to_proto(prop_to_class, set(class_to_prop.keys()))
outFile.write(proto_string)
outFile.close()
outFile = open((dst_path + 'schema_descriptor.json'), 'w')
json_descriptor = self.__get_json_descriptor(class_to_prop, prop_to_class, enumerations)
json.dump(json_descriptor, outFile, indent=4)
outFile.close() | Write the protobuf code for the graph to file.
Args:
dst_path (str): Path to the output directory where code has to be
written.
package_name (str): Package name for the proto code. | protogenerator/core/schema_generator.py | write_proto | googleinterns/schemaorg-generator | 0 | python | def write_proto(self, dst_path: str, package_name: str):
'Write the protobuf code for the graph to file.\n\n Args:\n dst_path (str): Path to the output directory where code has to be\n written.\n package_name (str): Package name for the proto code.\n '
assert isinstance(dst_path, str), "Invalid parameter 'dst_path' must be 'str'."
outFile = open((dst_path + 'schema.proto'), 'w')
(class_to_prop, prop_to_class, enumerations) = self.__get_values()
proto_string =
proto_string += self.__get_header(package_name)
proto_string += self.__get_options()
proto_string += self.__get_datatypes()
proto_string += self.__class_to_proto(class_to_prop, enumerations)
proto_string += self.__enum_to_proto(class_to_prop, enumerations)
proto_string += self.__prop_to_proto(prop_to_class, set(class_to_prop.keys()))
outFile.write(proto_string)
outFile.close()
outFile = open((dst_path + 'schema_descriptor.json'), 'w')
json_descriptor = self.__get_json_descriptor(class_to_prop, prop_to_class, enumerations)
json.dump(json_descriptor, outFile, indent=4)
outFile.close() | def write_proto(self, dst_path: str, package_name: str):
'Write the protobuf code for the graph to file.\n\n Args:\n dst_path (str): Path to the output directory where code has to be\n written.\n package_name (str): Package name for the proto code.\n '
assert isinstance(dst_path, str), "Invalid parameter 'dst_path' must be 'str'."
outFile = open((dst_path + 'schema.proto'), 'w')
(class_to_prop, prop_to_class, enumerations) = self.__get_values()
proto_string =
proto_string += self.__get_header(package_name)
proto_string += self.__get_options()
proto_string += self.__get_datatypes()
proto_string += self.__class_to_proto(class_to_prop, enumerations)
proto_string += self.__enum_to_proto(class_to_prop, enumerations)
proto_string += self.__prop_to_proto(prop_to_class, set(class_to_prop.keys()))
outFile.write(proto_string)
outFile.close()
outFile = open((dst_path + 'schema_descriptor.json'), 'w')
json_descriptor = self.__get_json_descriptor(class_to_prop, prop_to_class, enumerations)
json.dump(json_descriptor, outFile, indent=4)
outFile.close()<|docstring|>Write the protobuf code for the graph to file.
Args:
dst_path (str): Path to the output directory where code has to be
written.
package_name (str): Package name for the proto code.<|endoftext|> |
a3b06aad484d86db439760ed0e52a2a2bc3df4fc29366039e07c7fe1db64d629 | def __class_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call ClassDescriptor.to_proto() and get proto code for every schema\n class.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema classes in class_to_prop as\n a string.\n '
proto_class = '// Definition of classes begin here.\n\n'
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
comment = ''
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_class += class_descriptor.ClassDescriptor(x, list(class_to_prop[x])).to_proto(comment)
proto_class += '\n'
return proto_class | Call ClassDescriptor.to_proto() and get proto code for every schema
class.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema classes in class_to_prop as
a string. | protogenerator/core/schema_generator.py | __class_to_proto | googleinterns/schemaorg-generator | 0 | python | def __class_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call ClassDescriptor.to_proto() and get proto code for every schema\n class.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema classes in class_to_prop as\n a string.\n '
proto_class = '// Definition of classes begin here.\n\n'
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_class += class_descriptor.ClassDescriptor(x, list(class_to_prop[x])).to_proto(comment)
proto_class += '\n'
return proto_class | def __class_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call ClassDescriptor.to_proto() and get proto code for every schema\n class.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema classes in class_to_prop as\n a string.\n '
proto_class = '// Definition of classes begin here.\n\n'
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_class += class_descriptor.ClassDescriptor(x, list(class_to_prop[x])).to_proto(comment)
proto_class += '\n'
return proto_class<|docstring|>Call ClassDescriptor.to_proto() and get proto code for every schema
class.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema classes in class_to_prop as
a string.<|endoftext|> |
e056102112887965979bbf0d02cb7e5686d5b9d8894cc812c673193d131f67b0 | def __prop_to_proto(self, prop_to_class: Dict[(str, Set[str])], class_list: Set[str]):
'Call PropertyDescriptor.to_proto() and get proto code for every\n schema property.\n\n Args:\n prop_to_class (dict(set)): Dictionary containing range of\n class/datatypes for every property.\n class_list (set): Set of defined classes.\n\n Returns:\n str: The proto code for all the schema property in prop_to_class as\n a string.\n '
proto_property = '// Definition of properties begin here.\n\n'
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
comment = ''
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_property += property_descriptor.PropertyDescriptor(x, list(prop_to_class[x]), list(class_list)).to_proto(comment)
proto_property += '\n'
return proto_property | Call PropertyDescriptor.to_proto() and get proto code for every
schema property.
Args:
prop_to_class (dict(set)): Dictionary containing range of
class/datatypes for every property.
class_list (set): Set of defined classes.
Returns:
str: The proto code for all the schema property in prop_to_class as
a string. | protogenerator/core/schema_generator.py | __prop_to_proto | googleinterns/schemaorg-generator | 0 | python | def __prop_to_proto(self, prop_to_class: Dict[(str, Set[str])], class_list: Set[str]):
'Call PropertyDescriptor.to_proto() and get proto code for every\n schema property.\n\n Args:\n prop_to_class (dict(set)): Dictionary containing range of\n class/datatypes for every property.\n class_list (set): Set of defined classes.\n\n Returns:\n str: The proto code for all the schema property in prop_to_class as\n a string.\n '
proto_property = '// Definition of properties begin here.\n\n'
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_property += property_descriptor.PropertyDescriptor(x, list(prop_to_class[x]), list(class_list)).to_proto(comment)
proto_property += '\n'
return proto_property | def __prop_to_proto(self, prop_to_class: Dict[(str, Set[str])], class_list: Set[str]):
'Call PropertyDescriptor.to_proto() and get proto code for every\n schema property.\n\n Args:\n prop_to_class (dict(set)): Dictionary containing range of\n class/datatypes for every property.\n class_list (set): Set of defined classes.\n\n Returns:\n str: The proto code for all the schema property in prop_to_class as\n a string.\n '
proto_property = '// Definition of properties begin here.\n\n'
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_property += property_descriptor.PropertyDescriptor(x, list(prop_to_class[x]), list(class_list)).to_proto(comment)
proto_property += '\n'
return proto_property<|docstring|>Call PropertyDescriptor.to_proto() and get proto code for every
schema property.
Args:
prop_to_class (dict(set)): Dictionary containing range of
class/datatypes for every property.
class_list (set): Set of defined classes.
Returns:
str: The proto code for all the schema property in prop_to_class as
a string.<|endoftext|> |
4729afdcd019575ffed34d01f45a2511e7c8efd183f821e0e49482cd401d7dfe | def __enum_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call EnumDescriptor.to_proto() and get proto code for every schema\n enumeration.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema enumerations in enumerations\n as a string.\n '
proto_enum = '// Definition of enumerations begin here.\n\n'
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(utils.strip_url(ev))
comment = ''
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_enum += enum_descriptor.EnumDescriptor(x, list(class_to_prop[x]), list(enum_values)).to_proto(comment)
proto_enum += '\n'
return proto_enum | Call EnumDescriptor.to_proto() and get proto code for every schema
enumeration.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema enumerations in enumerations
as a string. | protogenerator/core/schema_generator.py | __enum_to_proto | googleinterns/schemaorg-generator | 0 | python | def __enum_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call EnumDescriptor.to_proto() and get proto code for every schema\n enumeration.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema enumerations in enumerations\n as a string.\n '
proto_enum = '// Definition of enumerations begin here.\n\n'
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(utils.strip_url(ev))
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_enum += enum_descriptor.EnumDescriptor(x, list(class_to_prop[x]), list(enum_values)).to_proto(comment)
proto_enum += '\n'
return proto_enum | def __enum_to_proto(self, class_to_prop: Dict[(str, Set[PropertyToParent])], enumerations: Set[str]):
'Call EnumDescriptor.to_proto() and get proto code for every schema\n enumeration.\n\n Args:\n class_to_prop (dict(set): Dictionary containing set of properties\n for every class.\n enumerations (set): Set containing the enumerations in the schema.\n\n Returns:\n str: The proto code for all the schema enumerations in enumerations\n as a string.\n '
proto_enum = '// Definition of enumerations begin here.\n\n'
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(utils.strip_url(ev))
comment =
for (_, _, c) in self.graph.triples((utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_enum += enum_descriptor.EnumDescriptor(x, list(class_to_prop[x]), list(enum_values)).to_proto(comment)
proto_enum += '\n'
return proto_enum<|docstring|>Call EnumDescriptor.to_proto() and get proto code for every schema
enumeration.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema enumerations in enumerations
as a string.<|endoftext|> |
633744b7046573bea7ecef9cdbe4d78a43659add95588ebb9e27894dbb589ad5 | def __get_values(self) -> Tuple[(Dict[(str, Set[PropertyToParent])], Dict[(str, Set[str])], Set[str])]:
'Call utils.toplogical_sort(), compress the inheritance heirarchy and\n return mappings between schema classes, schema properties and schema\n enumerations.\n\n Returns:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of\n class/datatypes for every property.\n set[str]: Set containing the enumerations in the schema.\n '
class_to_prop = dict()
inheritance_graph = dict()
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
class_to_prop[utils.strip_url(class_name)] = set()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['domainIncludes'], class_name)):
prop = utils.PropertyToParent(utils.strip_url(property_name), utils.strip_url(class_name))
class_to_prop[utils.strip_url(class_name)].add(prop)
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
if (class_name not in inheritance_graph):
inheritance_graph[class_name] = set()
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (parent_class not in inheritance_graph):
inheritance_graph[parent_class] = set()
inheritance_graph[parent_class].add(class_name)
topsort_order = utils.topological_sort(inheritance_graph)
for class_name in topsort_order:
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (utils.strip_url(parent_class) in class_to_prop):
class_to_prop[utils.strip_url(class_name)] = (class_to_prop[utils.strip_url(class_name)] | class_to_prop[utils.strip_url(parent_class)])
enumerations = set()
for (enum, _, _) in self.graph.triples((None, constants.schema_constants['subClassOf'], constants.schema_constants['Enumeration'])):
enumerations.add(utils.strip_url(enum))
class_to_children = utils.get_children(inheritance_graph)
prop_to_class = dict()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Property'])):
prop_to_class[utils.strip_url(property_name)] = set()
for (_, _, class_name) in self.graph.triples((property_name, constants.schema_constants['rangeIncludes'], None)):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(class_name))
if (class_name in class_to_children):
prop_to_class[utils.strip_url(property_name)] = (prop_to_class[utils.strip_url(property_name)] | set(map(utils.strip_url, class_to_children[class_name])))
if (class_name == constants.schema_constants['Number']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Integer']))
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Float']))
if (class_name == constants.schema_constants['Text']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['URL']))
return (class_to_prop, prop_to_class, enumerations) | Call utils.toplogical_sort(), compress the inheritance heirarchy and
return mappings between schema classes, schema properties and schema
enumerations.
Returns:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of
class/datatypes for every property.
set[str]: Set containing the enumerations in the schema. | protogenerator/core/schema_generator.py | __get_values | googleinterns/schemaorg-generator | 0 | python | def __get_values(self) -> Tuple[(Dict[(str, Set[PropertyToParent])], Dict[(str, Set[str])], Set[str])]:
'Call utils.toplogical_sort(), compress the inheritance heirarchy and\n return mappings between schema classes, schema properties and schema\n enumerations.\n\n Returns:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of\n class/datatypes for every property.\n set[str]: Set containing the enumerations in the schema.\n '
class_to_prop = dict()
inheritance_graph = dict()
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
class_to_prop[utils.strip_url(class_name)] = set()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['domainIncludes'], class_name)):
prop = utils.PropertyToParent(utils.strip_url(property_name), utils.strip_url(class_name))
class_to_prop[utils.strip_url(class_name)].add(prop)
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
if (class_name not in inheritance_graph):
inheritance_graph[class_name] = set()
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (parent_class not in inheritance_graph):
inheritance_graph[parent_class] = set()
inheritance_graph[parent_class].add(class_name)
topsort_order = utils.topological_sort(inheritance_graph)
for class_name in topsort_order:
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (utils.strip_url(parent_class) in class_to_prop):
class_to_prop[utils.strip_url(class_name)] = (class_to_prop[utils.strip_url(class_name)] | class_to_prop[utils.strip_url(parent_class)])
enumerations = set()
for (enum, _, _) in self.graph.triples((None, constants.schema_constants['subClassOf'], constants.schema_constants['Enumeration'])):
enumerations.add(utils.strip_url(enum))
class_to_children = utils.get_children(inheritance_graph)
prop_to_class = dict()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Property'])):
prop_to_class[utils.strip_url(property_name)] = set()
for (_, _, class_name) in self.graph.triples((property_name, constants.schema_constants['rangeIncludes'], None)):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(class_name))
if (class_name in class_to_children):
prop_to_class[utils.strip_url(property_name)] = (prop_to_class[utils.strip_url(property_name)] | set(map(utils.strip_url, class_to_children[class_name])))
if (class_name == constants.schema_constants['Number']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Integer']))
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Float']))
if (class_name == constants.schema_constants['Text']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['URL']))
return (class_to_prop, prop_to_class, enumerations) | def __get_values(self) -> Tuple[(Dict[(str, Set[PropertyToParent])], Dict[(str, Set[str])], Set[str])]:
'Call utils.toplogical_sort(), compress the inheritance heirarchy and\n return mappings between schema classes, schema properties and schema\n enumerations.\n\n Returns:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of\n class/datatypes for every property.\n set[str]: Set containing the enumerations in the schema.\n '
class_to_prop = dict()
inheritance_graph = dict()
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
class_to_prop[utils.strip_url(class_name)] = set()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['domainIncludes'], class_name)):
prop = utils.PropertyToParent(utils.strip_url(property_name), utils.strip_url(class_name))
class_to_prop[utils.strip_url(class_name)].add(prop)
for (class_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
if (class_name not in inheritance_graph):
inheritance_graph[class_name] = set()
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (parent_class not in inheritance_graph):
inheritance_graph[parent_class] = set()
inheritance_graph[parent_class].add(class_name)
topsort_order = utils.topological_sort(inheritance_graph)
for class_name in topsort_order:
for (_, _, parent_class) in self.graph.triples((class_name, constants.schema_constants['subClassOf'], None)):
if (utils.strip_url(parent_class) in class_to_prop):
class_to_prop[utils.strip_url(class_name)] = (class_to_prop[utils.strip_url(class_name)] | class_to_prop[utils.strip_url(parent_class)])
enumerations = set()
for (enum, _, _) in self.graph.triples((None, constants.schema_constants['subClassOf'], constants.schema_constants['Enumeration'])):
enumerations.add(utils.strip_url(enum))
class_to_children = utils.get_children(inheritance_graph)
prop_to_class = dict()
for (property_name, _, _) in self.graph.triples((None, constants.schema_constants['Type'], constants.schema_constants['Property'])):
prop_to_class[utils.strip_url(property_name)] = set()
for (_, _, class_name) in self.graph.triples((property_name, constants.schema_constants['rangeIncludes'], None)):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(class_name))
if (class_name in class_to_children):
prop_to_class[utils.strip_url(property_name)] = (prop_to_class[utils.strip_url(property_name)] | set(map(utils.strip_url, class_to_children[class_name])))
if (class_name == constants.schema_constants['Number']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Integer']))
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['Float']))
if (class_name == constants.schema_constants['Text']):
prop_to_class[utils.strip_url(property_name)].add(utils.strip_url(constants.schema_constants['URL']))
return (class_to_prop, prop_to_class, enumerations)<|docstring|>Call utils.toplogical_sort(), compress the inheritance heirarchy and
return mappings between schema classes, schema properties and schema
enumerations.
Returns:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of
class/datatypes for every property.
set[str]: Set containing the enumerations in the schema.<|endoftext|> |
d6bca56b3028dc1cf6de95125114f6dfb64f23025c310d7c14f606acd821d6a7 | def __get_header(self, package_name: str) -> str:
'Return the header for proto code file.\n\n Args:\n package_name (str): Package name for the proto code.\n\n Returns:\n str: The proto code of header as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_header = env.get_template('header.txt').render(package_name=package_name)
return proto_header | Return the header for proto code file.
Args:
package_name (str): Package name for the proto code.
Returns:
str: The proto code of header as a string. | protogenerator/core/schema_generator.py | __get_header | googleinterns/schemaorg-generator | 0 | python | def __get_header(self, package_name: str) -> str:
'Return the header for proto code file.\n\n Args:\n package_name (str): Package name for the proto code.\n\n Returns:\n str: The proto code of header as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_header = env.get_template('header.txt').render(package_name=package_name)
return proto_header | def __get_header(self, package_name: str) -> str:
'Return the header for proto code file.\n\n Args:\n package_name (str): Package name for the proto code.\n\n Returns:\n str: The proto code of header as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_header = env.get_template('header.txt').render(package_name=package_name)
return proto_header<|docstring|>Return the header for proto code file.
Args:
package_name (str): Package name for the proto code.
Returns:
str: The proto code of header as a string.<|endoftext|> |
02890b76cd43741d5fe4210bffb12fec81e6e3473b23c1a5d2560b440e607a2c | def __get_options(self) -> str:
'Return the options for JSONLD serializer.\n\n Returns:\n str: The proto code of options for JSONLD serializer as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_options = env.get_template('options.txt').render()
return proto_options | Return the options for JSONLD serializer.
Returns:
str: The proto code of options for JSONLD serializer as a string. | protogenerator/core/schema_generator.py | __get_options | googleinterns/schemaorg-generator | 0 | python | def __get_options(self) -> str:
'Return the options for JSONLD serializer.\n\n Returns:\n str: The proto code of options for JSONLD serializer as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_options = env.get_template('options.txt').render()
return proto_options | def __get_options(self) -> str:
'Return the options for JSONLD serializer.\n\n Returns:\n str: The proto code of options for JSONLD serializer as a string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_options = env.get_template('options.txt').render()
return proto_options<|docstring|>Return the options for JSONLD serializer.
Returns:
str: The proto code of options for JSONLD serializer as a string.<|endoftext|> |
79d7b96f591a0316c3669d1e4d16ef7f63a7caa946accf9ce71777393896c111 | def __get_datatypes(self) -> str:
'Return the datatypes in accordance with schemaorg.\n\n Returns:\n str: The proto code of datatypes in accordance with schemaorg as a\n string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_datatypes = env.get_template('datatypes.txt').render()
return proto_datatypes | Return the datatypes in accordance with schemaorg.
Returns:
str: The proto code of datatypes in accordance with schemaorg as a
string. | protogenerator/core/schema_generator.py | __get_datatypes | googleinterns/schemaorg-generator | 0 | python | def __get_datatypes(self) -> str:
'Return the datatypes in accordance with schemaorg.\n\n Returns:\n str: The proto code of datatypes in accordance with schemaorg as a\n string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_datatypes = env.get_template('datatypes.txt').render()
return proto_datatypes | def __get_datatypes(self) -> str:
'Return the datatypes in accordance with schemaorg.\n\n Returns:\n str: The proto code of datatypes in accordance with schemaorg as a\n string.\n '
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_datatypes = env.get_template('datatypes.txt').render()
return proto_datatypes<|docstring|>Return the datatypes in accordance with schemaorg.
Returns:
str: The proto code of datatypes in accordance with schemaorg as a
string.<|endoftext|> |
9c4e8be95e3ffe05c05bee832ed66b91c0a6912e0f5a91ce845f669d5d97b510 | def __get_json_descriptor(self, class_to_prop: Dict[(str, Set[PropertyToParent])], prop_to_class: Dict[(str, Set[str])], enumerations: Set[str]) -> Dict:
'Return a json descriptor for the given schema.\n\n Args:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of class/datatypes\n for every property.\n set[str]: Set containing the enumerations in the schema.\n\n Returns:\n dict: The json descriptor for the schema.\n '
defined_classes = set(class_to_prop.keys())
total_classes = set()
for (_, _, property_name) in self.graph.triples((None, utils.constants.schema_constants['rangeIncludes'], None)):
total_classes.add(utils.strip_url(property_name))
undefined_classes = total_classes.difference(defined_classes)
undefined_classes = (undefined_classes | set(utils.constants.schema_primitives.keys()))
message_descriptor = {}
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
o = {}
o['@type'] = utils.strip_url(x)
prop_from_self = list()
prop_inherited = dict()
o['fields'] = list()
o['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o['fields'].extend(props)
message_descriptor[x] = o
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
o = {}
o['@type'] = 'Property'
o['fields'] = sorted(list(prop_to_class[x]))
message_descriptor[x] = o
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(ev)
o = {}
o['@type'] = 'EnumWrapper'
o['values'] = sorted(list(enum_values))
o['values'].insert(0, 'Unknown')
o['fields'] = ['id', (x + 'Class')]
o2 = {}
o2['@type'] = x
prop_from_self = list()
prop_inherited = dict()
o2['fields'] = list()
o2['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o2['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o2['fields'].extend(props)
message_descriptor[x] = o
message_descriptor[(x + 'Class')] = o2
message_descriptor['Date'] = {}
message_descriptor['Date']['@type'] = 'DatatypeDate'
message_descriptor['DateTime'] = {}
message_descriptor['DateTime']['@type'] = 'DatatypeDateTime'
message_descriptor['Time'] = {}
message_descriptor['Time']['@type'] = 'DatatypeTime'
message_descriptor['Duration'] = {}
message_descriptor['Duration']['@type'] = 'DatatypeDuration'
message_descriptor['Distance'] = {}
message_descriptor['Distance']['@type'] = 'DatatypeQuantitative'
message_descriptor['Energy'] = {}
message_descriptor['Energy']['@type'] = 'DatatypeQuantitative'
message_descriptor['Mass'] = {}
message_descriptor['Mass']['@type'] = 'DatatypeQuantitative'
json_descriptor = {}
json_descriptor['messages'] = message_descriptor
json_descriptor['primitives'] = list(sorted(undefined_classes))
return json_descriptor | Return a json descriptor for the given schema.
Args:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of class/datatypes
for every property.
set[str]: Set containing the enumerations in the schema.
Returns:
dict: The json descriptor for the schema. | protogenerator/core/schema_generator.py | __get_json_descriptor | googleinterns/schemaorg-generator | 0 | python | def __get_json_descriptor(self, class_to_prop: Dict[(str, Set[PropertyToParent])], prop_to_class: Dict[(str, Set[str])], enumerations: Set[str]) -> Dict:
'Return a json descriptor for the given schema.\n\n Args:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of class/datatypes\n for every property.\n set[str]: Set containing the enumerations in the schema.\n\n Returns:\n dict: The json descriptor for the schema.\n '
defined_classes = set(class_to_prop.keys())
total_classes = set()
for (_, _, property_name) in self.graph.triples((None, utils.constants.schema_constants['rangeIncludes'], None)):
total_classes.add(utils.strip_url(property_name))
undefined_classes = total_classes.difference(defined_classes)
undefined_classes = (undefined_classes | set(utils.constants.schema_primitives.keys()))
message_descriptor = {}
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
o = {}
o['@type'] = utils.strip_url(x)
prop_from_self = list()
prop_inherited = dict()
o['fields'] = list()
o['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o['fields'].extend(props)
message_descriptor[x] = o
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
o = {}
o['@type'] = 'Property'
o['fields'] = sorted(list(prop_to_class[x]))
message_descriptor[x] = o
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(ev)
o = {}
o['@type'] = 'EnumWrapper'
o['values'] = sorted(list(enum_values))
o['values'].insert(0, 'Unknown')
o['fields'] = ['id', (x + 'Class')]
o2 = {}
o2['@type'] = x
prop_from_self = list()
prop_inherited = dict()
o2['fields'] = list()
o2['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o2['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o2['fields'].extend(props)
message_descriptor[x] = o
message_descriptor[(x + 'Class')] = o2
message_descriptor['Date'] = {}
message_descriptor['Date']['@type'] = 'DatatypeDate'
message_descriptor['DateTime'] = {}
message_descriptor['DateTime']['@type'] = 'DatatypeDateTime'
message_descriptor['Time'] = {}
message_descriptor['Time']['@type'] = 'DatatypeTime'
message_descriptor['Duration'] = {}
message_descriptor['Duration']['@type'] = 'DatatypeDuration'
message_descriptor['Distance'] = {}
message_descriptor['Distance']['@type'] = 'DatatypeQuantitative'
message_descriptor['Energy'] = {}
message_descriptor['Energy']['@type'] = 'DatatypeQuantitative'
message_descriptor['Mass'] = {}
message_descriptor['Mass']['@type'] = 'DatatypeQuantitative'
json_descriptor = {}
json_descriptor['messages'] = message_descriptor
json_descriptor['primitives'] = list(sorted(undefined_classes))
return json_descriptor | def __get_json_descriptor(self, class_to_prop: Dict[(str, Set[PropertyToParent])], prop_to_class: Dict[(str, Set[str])], enumerations: Set[str]) -> Dict:
'Return a json descriptor for the given schema.\n\n Args:\n dict[str, set[PropertyToParent]]: Dictionary containing set of\n properties for every class.\n dict[str, set[str]]: Dictionary containing range of class/datatypes\n for every property.\n set[str]: Set containing the enumerations in the schema.\n\n Returns:\n dict: The json descriptor for the schema.\n '
defined_classes = set(class_to_prop.keys())
total_classes = set()
for (_, _, property_name) in self.graph.triples((None, utils.constants.schema_constants['rangeIncludes'], None)):
total_classes.add(utils.strip_url(property_name))
undefined_classes = total_classes.difference(defined_classes)
undefined_classes = (undefined_classes | set(utils.constants.schema_primitives.keys()))
message_descriptor = {}
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (x not in constants.schema_primitives)):
o = {}
o['@type'] = utils.strip_url(x)
prop_from_self = list()
prop_inherited = dict()
o['fields'] = list()
o['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o['fields'].extend(props)
message_descriptor[x] = o
for x in sorted(prop_to_class.keys()):
if (len(prop_to_class[x]) > 0):
o = {}
o['@type'] = 'Property'
o['fields'] = sorted(list(prop_to_class[x]))
message_descriptor[x] = o
for x in sorted(enumerations):
enum_values = set()
for (ev, _, _) in self.graph.triples((None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(ev)
o = {}
o['@type'] = 'EnumWrapper'
o['values'] = sorted(list(enum_values))
o['values'].insert(0, 'Unknown')
o['fields'] = ['id', (x + 'Class')]
o2 = {}
o2['@type'] = x
prop_from_self = list()
prop_inherited = dict()
o2['fields'] = list()
o2['fields'].append('@id')
for p in class_to_prop[x]:
if (p.parent == x):
prop_from_self.append(p.name)
else:
if (p.parent not in prop_inherited):
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(sorted(prop_inherited.items()))
for p in prop_from_self:
o2['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o2['fields'].extend(props)
message_descriptor[x] = o
message_descriptor[(x + 'Class')] = o2
message_descriptor['Date'] = {}
message_descriptor['Date']['@type'] = 'DatatypeDate'
message_descriptor['DateTime'] = {}
message_descriptor['DateTime']['@type'] = 'DatatypeDateTime'
message_descriptor['Time'] = {}
message_descriptor['Time']['@type'] = 'DatatypeTime'
message_descriptor['Duration'] = {}
message_descriptor['Duration']['@type'] = 'DatatypeDuration'
message_descriptor['Distance'] = {}
message_descriptor['Distance']['@type'] = 'DatatypeQuantitative'
message_descriptor['Energy'] = {}
message_descriptor['Energy']['@type'] = 'DatatypeQuantitative'
message_descriptor['Mass'] = {}
message_descriptor['Mass']['@type'] = 'DatatypeQuantitative'
json_descriptor = {}
json_descriptor['messages'] = message_descriptor
json_descriptor['primitives'] = list(sorted(undefined_classes))
return json_descriptor<|docstring|>Return a json descriptor for the given schema.
Args:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of class/datatypes
for every property.
set[str]: Set containing the enumerations in the schema.
Returns:
dict: The json descriptor for the schema.<|endoftext|> |
ba98f31ce759246d4f4d895a04f1b6e6870de4e065b0597ea7ec85f9023214e8 | def create(node):
'Create an instance of the appropriate DriverFields class.\n\n :param node: a node object returned from ironicclient\n :returns: GenericDriverFields or a subclass thereof, as appropriate\n for the supplied node.\n '
if ('pxe' in node.driver):
return PXEDriverFields(node)
else:
return GenericDriverFields(node) | Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node. | nova/virt/ironic/patcher.py | create | Metaswitch/calico-nova | 7 | python | def create(node):
'Create an instance of the appropriate DriverFields class.\n\n :param node: a node object returned from ironicclient\n :returns: GenericDriverFields or a subclass thereof, as appropriate\n for the supplied node.\n '
if ('pxe' in node.driver):
return PXEDriverFields(node)
else:
return GenericDriverFields(node) | def create(node):
'Create an instance of the appropriate DriverFields class.\n\n :param node: a node object returned from ironicclient\n :returns: GenericDriverFields or a subclass thereof, as appropriate\n for the supplied node.\n '
if ('pxe' in node.driver):
return PXEDriverFields(node)
else:
return GenericDriverFields(node)<|docstring|>Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node.<|endoftext|> |
c94036e1e653b0fd5d3d6fff595d8bdeb8f386427594d2837ef611647caaaadc | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add', 'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add', 'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add', 'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb', 'op': 'add', 'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format', 'op': 'add', 'value': CONF.default_ephemeral_format})
if (preserve_ephemeral is not None):
patch.append({'path': '/instance_info/preserve_ephemeral', 'op': 'add', 'value': str(preserve_ephemeral)})
return patch | Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated. | nova/virt/ironic/patcher.py | get_deploy_patch | Metaswitch/calico-nova | 7 | python | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add', 'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add', 'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add', 'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb', 'op': 'add', 'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format', 'op': 'add', 'value': CONF.default_ephemeral_format})
if (preserve_ephemeral is not None):
patch.append({'path': '/instance_info/preserve_ephemeral', 'op': 'add', 'value': str(preserve_ephemeral)})
return patch | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add', 'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add', 'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add', 'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb', 'op': 'add', 'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format', 'op': 'add', 'value': CONF.default_ephemeral_format})
if (preserve_ephemeral is not None):
patch.append({'path': '/instance_info/preserve_ephemeral', 'op': 'add', 'value': str(preserve_ephemeral)})
return patch<|docstring|>Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.<|endoftext|> |
b453b0de24f68073e03e44d745fe44953779a5fd8ea7ee316005acbf26033166 | def get_cleanup_patch(self, instance, network_info, flavor):
'Build a patch to clean up the fields.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
return [] | Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated. | nova/virt/ironic/patcher.py | get_cleanup_patch | Metaswitch/calico-nova | 7 | python | def get_cleanup_patch(self, instance, network_info, flavor):
'Build a patch to clean up the fields.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
return [] | def get_cleanup_patch(self, instance, network_info, flavor):
'Build a patch to clean up the fields.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
return []<|docstring|>Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.<|endoftext|> |
e755cc6171efc10dd3e66ada7bad67137ae27df7d91af22f9c91496556faf52e | def _get_kernel_ramdisk_dict(self, flavor):
'Get the deploy ramdisk and kernel IDs from the flavor.\n\n :param flavor: the flavor object.\n :returns: a dict with the pxe options for the deploy ramdisk and\n kernel if the IDs were found in the flavor, otherwise an empty\n dict is returned.\n\n '
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if (deploy_kernel and deploy_ramdisk):
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids | Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned. | nova/virt/ironic/patcher.py | _get_kernel_ramdisk_dict | Metaswitch/calico-nova | 7 | python | def _get_kernel_ramdisk_dict(self, flavor):
'Get the deploy ramdisk and kernel IDs from the flavor.\n\n :param flavor: the flavor object.\n :returns: a dict with the pxe options for the deploy ramdisk and\n kernel if the IDs were found in the flavor, otherwise an empty\n dict is returned.\n\n '
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if (deploy_kernel and deploy_ramdisk):
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids | def _get_kernel_ramdisk_dict(self, flavor):
'Get the deploy ramdisk and kernel IDs from the flavor.\n\n :param flavor: the flavor object.\n :returns: a dict with the pxe options for the deploy ramdisk and\n kernel if the IDs were found in the flavor, otherwise an empty\n dict is returned.\n\n '
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if (deploy_kernel and deploy_ramdisk):
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids<|docstring|>Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned.<|endoftext|> |
cb9c73af466208f98708d8ab16ad9b06b98261ce40da1981808db5366be4f4fe | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n Build a json-patch to add the required fields to deploy a node\n using the PXE driver.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = super(PXEDriverFields, self).get_deploy_patch(instance, image_meta, flavor, preserve_ephemeral)
for (key, value) in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': ('/driver_info/%s' % key), 'op': 'add', 'value': value})
return patch | Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated. | nova/virt/ironic/patcher.py | get_deploy_patch | Metaswitch/calico-nova | 7 | python | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n Build a json-patch to add the required fields to deploy a node\n using the PXE driver.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = super(PXEDriverFields, self).get_deploy_patch(instance, image_meta, flavor, preserve_ephemeral)
for (key, value) in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': ('/driver_info/%s' % key), 'op': 'add', 'value': value})
return patch | def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None):
'Build a patch to add the required fields to deploy a node.\n\n Build a json-patch to add the required fields to deploy a node\n using the PXE driver.\n\n :param instance: the instance object.\n :param image_meta: the metadata associated with the instance\n image.\n :param flavor: the flavor object.\n :param preserve_ephemeral: preserve_ephemeral status (bool) to be\n specified during rebuild.\n :returns: a json-patch with the fields that needs to be updated.\n\n '
patch = super(PXEDriverFields, self).get_deploy_patch(instance, image_meta, flavor, preserve_ephemeral)
for (key, value) in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': ('/driver_info/%s' % key), 'op': 'add', 'value': value})
return patch<|docstring|>Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.<|endoftext|> |
cacf25792a0d132ad11ff6bdd3fe9b795f160dcff7ac89e3c4684386fc2b16ef | def get_cleanup_patch(self, instance, network_info, flavor):
"Build a patch to clean up the fields.\n\n Build a json-patch to remove the fields used to deploy a node\n using the PXE driver. Note that the fields added to the Node's\n instance_info don't need to be removed because they are purged\n during the Node's tear down.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n "
patch = super(PXEDriverFields, self).get_cleanup_patch(instance, network_info, flavor)
for key in self._get_kernel_ramdisk_dict(flavor):
if (key in self.node.driver_info):
patch.append({'op': 'remove', 'path': ('/driver_info/%s' % key)})
return patch | Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated. | nova/virt/ironic/patcher.py | get_cleanup_patch | Metaswitch/calico-nova | 7 | python | def get_cleanup_patch(self, instance, network_info, flavor):
"Build a patch to clean up the fields.\n\n Build a json-patch to remove the fields used to deploy a node\n using the PXE driver. Note that the fields added to the Node's\n instance_info don't need to be removed because they are purged\n during the Node's tear down.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n "
patch = super(PXEDriverFields, self).get_cleanup_patch(instance, network_info, flavor)
for key in self._get_kernel_ramdisk_dict(flavor):
if (key in self.node.driver_info):
patch.append({'op': 'remove', 'path': ('/driver_info/%s' % key)})
return patch | def get_cleanup_patch(self, instance, network_info, flavor):
"Build a patch to clean up the fields.\n\n Build a json-patch to remove the fields used to deploy a node\n using the PXE driver. Note that the fields added to the Node's\n instance_info don't need to be removed because they are purged\n during the Node's tear down.\n\n :param instance: the instance object.\n :param network_info: the instance network information.\n :param flavor: the flavor object.\n :returns: a json-patch with the fields that needs to be updated.\n\n "
patch = super(PXEDriverFields, self).get_cleanup_patch(instance, network_info, flavor)
for key in self._get_kernel_ramdisk_dict(flavor):
if (key in self.node.driver_info):
patch.append({'op': 'remove', 'path': ('/driver_info/%s' % key)})
return patch<|docstring|>Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.<|endoftext|> |
38f2049c1ed06db9e1538adbe96ca2118567fce29d4e27c9625e511609267069 | def evaluate(self, state):
'Evaluate the fitness of a state vector.\n\n Parameters\n ----------\n state: array\n State array for evaluation.\n\n Returns\n -------\n fitness: float\n Value of fitness function.\n '
fitness = self.fitness_fn(state, **self.kwargs)
return fitness | Evaluate the fitness of a state vector.
Parameters
----------
state: array
State array for evaluation.
Returns
-------
fitness: float
Value of fitness function. | mlrose_hiive/fitness/custom_fitness.py | evaluate | Inquisitive-ME/mlrose | 63 | python | def evaluate(self, state):
'Evaluate the fitness of a state vector.\n\n Parameters\n ----------\n state: array\n State array for evaluation.\n\n Returns\n -------\n fitness: float\n Value of fitness function.\n '
fitness = self.fitness_fn(state, **self.kwargs)
return fitness | def evaluate(self, state):
'Evaluate the fitness of a state vector.\n\n Parameters\n ----------\n state: array\n State array for evaluation.\n\n Returns\n -------\n fitness: float\n Value of fitness function.\n '
fitness = self.fitness_fn(state, **self.kwargs)
return fitness<|docstring|>Evaluate the fitness of a state vector.
Parameters
----------
state: array
State array for evaluation.
Returns
-------
fitness: float
Value of fitness function.<|endoftext|> |
b358c98222c33f66fb5b47a2466431faaf2fb426e41176a76ba09724f73565ae | def get_prob_type(self):
" Return the problem type.\n\n Returns\n -------\n self.prob_type: string\n Specifies problem type as 'discrete', 'continuous', 'tsp'\n or 'either'.\n "
return self.problem_type | Return the problem type.
Returns
-------
self.prob_type: string
Specifies problem type as 'discrete', 'continuous', 'tsp'
or 'either'. | mlrose_hiive/fitness/custom_fitness.py | get_prob_type | Inquisitive-ME/mlrose | 63 | python | def get_prob_type(self):
" Return the problem type.\n\n Returns\n -------\n self.prob_type: string\n Specifies problem type as 'discrete', 'continuous', 'tsp'\n or 'either'.\n "
return self.problem_type | def get_prob_type(self):
" Return the problem type.\n\n Returns\n -------\n self.prob_type: string\n Specifies problem type as 'discrete', 'continuous', 'tsp'\n or 'either'.\n "
return self.problem_type<|docstring|>Return the problem type.
Returns
-------
self.prob_type: string
Specifies problem type as 'discrete', 'continuous', 'tsp'
or 'either'.<|endoftext|> |
c70dafa0e7fb1e660a286253e12043d8b37c79babaa43bd2aad09d9959de24c2 | def learn(self, experiences, gamma):
"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n "
self.optimizer.zero_grad()
(states, actions, rewards, next_states, dones) = experiences
best_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)
q_values_target = self.qnetwork_target(next_states).detach()
q_expected = (rewards + ((gamma * q_values_target.gather(1, best_actions)) * (1 - dones)))
q_current = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(q_expected, q_current)
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU) | Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor | agents/double_dqn_agent.py | learn | itraveribon/banana_dqn_udacity | 0 | python | def learn(self, experiences, gamma):
"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n "
self.optimizer.zero_grad()
(states, actions, rewards, next_states, dones) = experiences
best_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)
q_values_target = self.qnetwork_target(next_states).detach()
q_expected = (rewards + ((gamma * q_values_target.gather(1, best_actions)) * (1 - dones)))
q_current = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(q_expected, q_current)
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU) | def learn(self, experiences, gamma):
"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n "
self.optimizer.zero_grad()
(states, actions, rewards, next_states, dones) = experiences
best_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)
q_values_target = self.qnetwork_target(next_states).detach()
q_expected = (rewards + ((gamma * q_values_target.gather(1, best_actions)) * (1 - dones)))
q_current = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(q_expected, q_current)
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU)<|docstring|>Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor<|endoftext|> |
fd152d0aa17cf22d2257ab3c4cb1b46af1475be9d4028f9a7c64c0fc9526e740 | def create_reverse_many_to_one_manager(superclass, rel):
'\n Create a manager for the reverse side of a many-to-one relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-one relations.\n '
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == '') and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {('%s__in' % self.field.name): instances}
queryset = queryset.filter(**query)
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return (queryset, rel_obj_attr, instance_attr, False, cache_name, False)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if (obj._state.adding or (obj._state.db != db)):
raise ValueError(("%r instance isn't saved. Use bulk=False or save the object first." % obj))
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{self.field.name: self.instance})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
if rel.field.null:
def remove(self, *objs, bulk=True):
if (not objs):
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
if (self.field.get_local_related_value(obj) == val):
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(('%r is not related to %r.' % (obj, self.instance)))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if (obj in old_objs):
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager | Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations. | django/db/models/fields/related_descriptors.py | create_reverse_many_to_one_manager | cangSDARM/django | 61,676 | python | def create_reverse_many_to_one_manager(superclass, rel):
'\n Create a manager for the reverse side of a many-to-one relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-one relations.\n '
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == ) and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {('%s__in' % self.field.name): instances}
queryset = queryset.filter(**query)
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return (queryset, rel_obj_attr, instance_attr, False, cache_name, False)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if (obj._state.adding or (obj._state.db != db)):
raise ValueError(("%r instance isn't saved. Use bulk=False or save the object first." % obj))
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{self.field.name: self.instance})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
if rel.field.null:
def remove(self, *objs, bulk=True):
if (not objs):
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
if (self.field.get_local_related_value(obj) == val):
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(('%r is not related to %r.' % (obj, self.instance)))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if (obj in old_objs):
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager | def create_reverse_many_to_one_manager(superclass, rel):
'\n Create a manager for the reverse side of a many-to-one relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-one relations.\n '
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == ) and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {('%s__in' % self.field.name): instances}
queryset = queryset.filter(**query)
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return (queryset, rel_obj_attr, instance_attr, False, cache_name, False)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if (obj._state.adding or (obj._state.db != db)):
raise ValueError(("%r instance isn't saved. Use bulk=False or save the object first." % obj))
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{self.field.name: self.instance})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
if rel.field.null:
def remove(self, *objs, bulk=True):
if (not objs):
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if (not isinstance(obj, self.model)):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
if (self.field.get_local_related_value(obj) == val):
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(('%r is not related to %r.' % (obj, self.instance)))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if (obj in old_objs):
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager<|docstring|>Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.<|endoftext|> |
326247d1d2c37ac814f379d4ade21909a171346d0a293b11731c747b4f8547d7 | def create_forward_many_to_many_manager(superclass, rel, reverse):
'\n Create a manager for the either side of a many-to-many relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-many relations.\n '
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if (not reverse):
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for (lh_field, rh_field) in self.source_field.related_fields:
core_filter_key = ('%s__%s' % (self.query_field_name, rh_field.name))
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if (None in self.related_val):
raise ValueError(('"%r" needs to have a value for field "%s" before this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])))
if (instance.pk is None):
raise ValueError(('%r instance needs to have a primary key value before a many-to-many relationship can be used.' % instance.__class__.__name__))
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q((self.source_field_name, self.related_val))
removed_vals_filters = ((not isinstance(removed_vals, QuerySet)) or removed_vals._has_filters())
if removed_vals_filters:
filters &= Q((f'{self.target_field_name}__in', removed_vals))
if self.symmetrical:
symmetrical_filters = Q((self.target_field_name, self.related_val))
if removed_vals_filters:
symmetrical_filters &= Q((f'{self.source_field_name}__in', removed_vals))
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
query = {('%s__in' % self.query_field_name): instances}
queryset = queryset._next_is_sticky().filter(**query)
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={('_prefetch_related_val_%s' % f.attname): ('%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields})
return (queryset, (lambda result: tuple((getattr(result, ('_prefetch_related_val_%s' % f.attname)) for f in fk.local_related_fields))), (lambda inst: tuple((f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields))), False, self.prefetch_cache_name, False)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults)
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj))
if (fk_val in old_ids):
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
'\n Return the set of ids of `objs` that the target field references.\n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals)))
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
if (not objs):
return
through_defaults = dict(resolve_callables((through_defaults or {})))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
(can_ignore_conflicts, must_send_signals, can_fast_add) = self._get_add_plan(db, source_field_name)
if can_fast_add:
self.through._default_manager.using(db).bulk_create([self.through(**{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in target_ids], ignore_conflicts=True)
return
missing_target_ids = self._get_missing_target_ids(source_field_name, target_field_name, db, target_ids)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
self.through._default_manager.using(db).bulk_create([self.through(**through_defaults, **{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in missing_target_ids], ignore_conflicts=can_ignore_conflicts)
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
if (not objs):
return
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{('%s__in' % self.target_field.target_field.attname): old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager | Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations. | django/db/models/fields/related_descriptors.py | create_forward_many_to_many_manager | cangSDARM/django | 61,676 | python | def create_forward_many_to_many_manager(superclass, rel, reverse):
'\n Create a manager for the either side of a many-to-many relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-many relations.\n '
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if (not reverse):
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for (lh_field, rh_field) in self.source_field.related_fields:
core_filter_key = ('%s__%s' % (self.query_field_name, rh_field.name))
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if (None in self.related_val):
raise ValueError(('"%r" needs to have a value for field "%s" before this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])))
if (instance.pk is None):
raise ValueError(('%r instance needs to have a primary key value before a many-to-many relationship can be used.' % instance.__class__.__name__))
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q((self.source_field_name, self.related_val))
removed_vals_filters = ((not isinstance(removed_vals, QuerySet)) or removed_vals._has_filters())
if removed_vals_filters:
filters &= Q((f'{self.target_field_name}__in', removed_vals))
if self.symmetrical:
symmetrical_filters = Q((self.target_field_name, self.related_val))
if removed_vals_filters:
symmetrical_filters &= Q((f'{self.source_field_name}__in', removed_vals))
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
query = {('%s__in' % self.query_field_name): instances}
queryset = queryset._next_is_sticky().filter(**query)
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={('_prefetch_related_val_%s' % f.attname): ('%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields})
return (queryset, (lambda result: tuple((getattr(result, ('_prefetch_related_val_%s' % f.attname)) for f in fk.local_related_fields))), (lambda inst: tuple((f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields))), False, self.prefetch_cache_name, False)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults)
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj))
if (fk_val in old_ids):
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
'\n Return the set of ids of `objs` that the target field references.\n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals)))
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
if (not objs):
return
through_defaults = dict(resolve_callables((through_defaults or {})))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
(can_ignore_conflicts, must_send_signals, can_fast_add) = self._get_add_plan(db, source_field_name)
if can_fast_add:
self.through._default_manager.using(db).bulk_create([self.through(**{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in target_ids], ignore_conflicts=True)
return
missing_target_ids = self._get_missing_target_ids(source_field_name, target_field_name, db, target_ids)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
self.through._default_manager.using(db).bulk_create([self.through(**through_defaults, **{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in missing_target_ids], ignore_conflicts=can_ignore_conflicts)
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
if (not objs):
return
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{('%s__in' % self.target_field.target_field.attname): old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager | def create_forward_many_to_many_manager(superclass, rel, reverse):
'\n Create a manager for the either side of a many-to-many relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-many relations.\n '
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if (not reverse):
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for (lh_field, rh_field) in self.source_field.related_fields:
core_filter_key = ('%s__%s' % (self.query_field_name, rh_field.name))
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if (None in self.related_val):
raise ValueError(('"%r" needs to have a value for field "%s" before this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])))
if (instance.pk is None):
raise ValueError(('%r instance needs to have a primary key value before a many-to-many relationship can be used.' % instance.__class__.__name__))
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q((self.source_field_name, self.related_val))
removed_vals_filters = ((not isinstance(removed_vals, QuerySet)) or removed_vals._has_filters())
if removed_vals_filters:
filters &= Q((f'{self.target_field_name}__in', removed_vals))
if self.symmetrical:
symmetrical_filters = Q((self.target_field_name, self.related_val))
if removed_vals_filters:
symmetrical_filters &= Q((f'{self.source_field_name}__in', removed_vals))
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if (queryset is None):
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using((queryset._db or self._db))
query = {('%s__in' % self.query_field_name): instances}
queryset = queryset._next_is_sticky().filter(**query)
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={('_prefetch_related_val_%s' % f.attname): ('%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields})
return (queryset, (lambda result: tuple((getattr(result, ('_prefetch_related_val_%s' % f.attname)) for f in fk.local_related_fields))), (lambda inst: tuple((f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields))), False, self.prefetch_cache_name, False)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults)
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj))
if (fk_val in old_ids):
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
(obj, created) = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
if created:
self.add(obj, through_defaults=through_defaults)
return (obj, created)
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
'\n Return the set of ids of `objs` that the target field references.\n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals)))
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
if (not objs):
return
through_defaults = dict(resolve_callables((through_defaults or {})))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
(can_ignore_conflicts, must_send_signals, can_fast_add) = self._get_add_plan(db, source_field_name)
if can_fast_add:
self.through._default_manager.using(db).bulk_create([self.through(**{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in target_ids], ignore_conflicts=True)
return
missing_target_ids = self._get_missing_target_ids(source_field_name, target_field_name, db, target_ids)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
self.through._default_manager.using(db).bulk_create([self.through(**through_defaults, **{('%s_id' % source_field_name): self.related_val[0], ('%s_id' % target_field_name): target_id}) for target_id in missing_target_ids], ignore_conflicts=can_ignore_conflicts)
if must_send_signals:
signals.m2m_changed.send(sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
if (not objs):
return
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action='pre_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{('%s__in' % self.target_field.target_field.attname): old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action='post_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager<|docstring|>Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.<|endoftext|> |
d136ef9c88edcd82efd4cfb45788c6ff0fe555965affd1020f4a28726d7b864c | def __get__(self, instance, cls=None):
"\n Get the related instance through the forward relation.\n\n With the example above, when getting ``child.parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``cls`` is the ``Child`` class (we don't need it)\n "
if (instance is None):
return self
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = (None not in self.field.get_local_related_value(instance))
ancestor_link = (instance._meta.get_ancestor_link(self.field.model) if has_value else None)
if (ancestor_link and ancestor_link.is_cached(instance)):
ancestor = ancestor_link.get_cached_value(instance)
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if ((rel_obj is None) and has_value):
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
if (not remote_field.multiple):
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if ((rel_obj is None) and (not self.field.null)):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (self.field.model.__name__, self.field.name)))
else:
return rel_obj | Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it) | django/db/models/fields/related_descriptors.py | __get__ | cangSDARM/django | 61,676 | python | def __get__(self, instance, cls=None):
"\n Get the related instance through the forward relation.\n\n With the example above, when getting ``child.parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``cls`` is the ``Child`` class (we don't need it)\n "
if (instance is None):
return self
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = (None not in self.field.get_local_related_value(instance))
ancestor_link = (instance._meta.get_ancestor_link(self.field.model) if has_value else None)
if (ancestor_link and ancestor_link.is_cached(instance)):
ancestor = ancestor_link.get_cached_value(instance)
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if ((rel_obj is None) and has_value):
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
if (not remote_field.multiple):
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if ((rel_obj is None) and (not self.field.null)):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (self.field.model.__name__, self.field.name)))
else:
return rel_obj | def __get__(self, instance, cls=None):
"\n Get the related instance through the forward relation.\n\n With the example above, when getting ``child.parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``cls`` is the ``Child`` class (we don't need it)\n "
if (instance is None):
return self
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = (None not in self.field.get_local_related_value(instance))
ancestor_link = (instance._meta.get_ancestor_link(self.field.model) if has_value else None)
if (ancestor_link and ancestor_link.is_cached(instance)):
ancestor = ancestor_link.get_cached_value(instance)
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if ((rel_obj is None) and has_value):
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
if (not remote_field.multiple):
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if ((rel_obj is None) and (not self.field.null)):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (self.field.model.__name__, self.field.name)))
else:
return rel_obj<|docstring|>Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)<|endoftext|> |
2928fbb49c164fa2c7ab66f992cf435e4b7232fb741ae09857d6764079b86b4b | def __set__(self, instance, value):
'\n Set the related instance through the forward relation.\n\n With the example above, when setting ``child.parent = parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``value`` is the ``parent`` instance on the right of the equal sign\n '
if ((value is not None) and (not isinstance(value, self.field.remote_field.model._meta.concrete_model))):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name)))
elif (value is not None):
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
remote_field = self.field.remote_field
if (value is None):
related = self.field.get_cached_value(instance, default=None)
if (related is not None):
remote_field.set_cached_value(related, None)
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, None)
else:
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
self.field.set_cached_value(instance, value)
if ((value is not None) and (not remote_field.multiple)):
remote_field.set_cached_value(value, instance) | Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign | django/db/models/fields/related_descriptors.py | __set__ | cangSDARM/django | 61,676 | python | def __set__(self, instance, value):
'\n Set the related instance through the forward relation.\n\n With the example above, when setting ``child.parent = parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``value`` is the ``parent`` instance on the right of the equal sign\n '
if ((value is not None) and (not isinstance(value, self.field.remote_field.model._meta.concrete_model))):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name)))
elif (value is not None):
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
remote_field = self.field.remote_field
if (value is None):
related = self.field.get_cached_value(instance, default=None)
if (related is not None):
remote_field.set_cached_value(related, None)
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, None)
else:
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
self.field.set_cached_value(instance, value)
if ((value is not None) and (not remote_field.multiple)):
remote_field.set_cached_value(value, instance) | def __set__(self, instance, value):
'\n Set the related instance through the forward relation.\n\n With the example above, when setting ``child.parent = parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``value`` is the ``parent`` instance on the right of the equal sign\n '
if ((value is not None) and (not isinstance(value, self.field.remote_field.model._meta.concrete_model))):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name)))
elif (value is not None):
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
remote_field = self.field.remote_field
if (value is None):
related = self.field.get_cached_value(instance, default=None)
if (related is not None):
remote_field.set_cached_value(related, None)
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, None)
else:
for (lh_field, rh_field) in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
self.field.set_cached_value(instance, value)
if ((value is not None) and (not remote_field.multiple)):
remote_field.set_cached_value(value, instance)<|docstring|>Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign<|endoftext|> |
8edd1cca65d873d65c417a438245c0c81a591c57ae1dba30e9541e0f8b8ba528 | def __reduce__(self):
'\n Pickling should return the instance attached by self.field on the\n model, not a new copy of that descriptor. Use getattr() to retrieve\n the instance directly from the model.\n '
return (getattr, (self.field.model, self.field.name)) | Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model. | django/db/models/fields/related_descriptors.py | __reduce__ | cangSDARM/django | 61,676 | python | def __reduce__(self):
'\n Pickling should return the instance attached by self.field on the\n model, not a new copy of that descriptor. Use getattr() to retrieve\n the instance directly from the model.\n '
return (getattr, (self.field.model, self.field.name)) | def __reduce__(self):
'\n Pickling should return the instance attached by self.field on the\n model, not a new copy of that descriptor. Use getattr() to retrieve\n the instance directly from the model.\n '
return (getattr, (self.field.model, self.field.name))<|docstring|>Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.<|endoftext|> |
80cdb0ce0d0ba93a1ecdd13352f2c0ee0451e0942e32073a386261c3faf470dc | def __get__(self, instance, cls=None):
'\n Get the related instance through the reverse relation.\n\n With the example above, when getting ``place.restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``cls`` is the ``Place`` class (unused)\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (instance is None):
return self
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if (related_pk is None):
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if (rel_obj is None):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (instance.__class__.__name__, self.related.get_accessor_name())))
else:
return rel_obj | Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. | django/db/models/fields/related_descriptors.py | __get__ | cangSDARM/django | 61,676 | python | def __get__(self, instance, cls=None):
'\n Get the related instance through the reverse relation.\n\n With the example above, when getting ``place.restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``cls`` is the ``Place`` class (unused)\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (instance is None):
return self
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if (related_pk is None):
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if (rel_obj is None):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (instance.__class__.__name__, self.related.get_accessor_name())))
else:
return rel_obj | def __get__(self, instance, cls=None):
'\n Get the related instance through the reverse relation.\n\n With the example above, when getting ``place.restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``cls`` is the ``Place`` class (unused)\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (instance is None):
return self
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if (related_pk is None):
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if (rel_obj is None):
raise self.RelatedObjectDoesNotExist(('%s has no %s.' % (instance.__class__.__name__, self.related.get_accessor_name())))
else:
return rel_obj<|docstring|>Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.<|endoftext|> |
c1f895473fdbcffa9438437de8f9af0f0f371ded8c6e5944c1b3fc041f1aebd9 | def __set__(self, instance, value):
'\n Set the related instance through the reverse relation.\n\n With the example above, when setting ``place.restaurant = restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``value`` is the ``restaurant`` instance on the right of the equal sign\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (value is None):
rel_obj = self.related.get_cached_value(instance, default=None)
if (rel_obj is not None):
self.related.delete_cached_value(instance)
setattr(rel_obj, self.related.field.name, None)
elif (not isinstance(value, self.related.related_model)):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name)))
else:
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
related_pk = tuple((getattr(instance, field.attname) for field in self.related.field.foreign_related_fields))
for (index, field) in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
self.related.set_cached_value(instance, value)
self.related.field.set_cached_value(value, instance) | Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. | django/db/models/fields/related_descriptors.py | __set__ | cangSDARM/django | 61,676 | python | def __set__(self, instance, value):
'\n Set the related instance through the reverse relation.\n\n With the example above, when setting ``place.restaurant = restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``value`` is the ``restaurant`` instance on the right of the equal sign\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (value is None):
rel_obj = self.related.get_cached_value(instance, default=None)
if (rel_obj is not None):
self.related.delete_cached_value(instance)
setattr(rel_obj, self.related.field.name, None)
elif (not isinstance(value, self.related.related_model)):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name)))
else:
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
related_pk = tuple((getattr(instance, field.attname) for field in self.related.field.foreign_related_fields))
for (index, field) in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
self.related.set_cached_value(instance, value)
self.related.field.set_cached_value(value, instance) | def __set__(self, instance, value):
'\n Set the related instance through the reverse relation.\n\n With the example above, when setting ``place.restaurant = restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``value`` is the ``restaurant`` instance on the right of the equal sign\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n '
if (value is None):
rel_obj = self.related.get_cached_value(instance, default=None)
if (rel_obj is not None):
self.related.delete_cached_value(instance)
setattr(rel_obj, self.related.field.name, None)
elif (not isinstance(value, self.related.related_model)):
raise ValueError(('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name)))
else:
if (instance._state.db is None):
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if (value._state.db is None):
value._state.db = router.db_for_write(value.__class__, instance=instance)
if (not router.allow_relation(value, instance)):
raise ValueError(('Cannot assign "%r": the current database router prevents this relation.' % value))
related_pk = tuple((getattr(instance, field.attname) for field in self.related.field.foreign_related_fields))
for (index, field) in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
self.related.set_cached_value(instance, value)
self.related.field.set_cached_value(value, instance)<|docstring|>Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.<|endoftext|> |
827222b5ba39c92fc6367abb0da37a6678b82de1d54f9743a8d4fd70758fe196 | def __get__(self, instance, cls=None):
'\n Get the related objects through the reverse relation.\n\n With the example above, when getting ``parent.children``:\n\n - ``self`` is the descriptor managing the ``children`` attribute\n - ``instance`` is the ``parent`` instance\n - ``cls`` is the ``Parent`` class (unused)\n '
if (instance is None):
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if (key not in instance_cache):
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key] | Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused) | django/db/models/fields/related_descriptors.py | __get__ | cangSDARM/django | 61,676 | python | def __get__(self, instance, cls=None):
'\n Get the related objects through the reverse relation.\n\n With the example above, when getting ``parent.children``:\n\n - ``self`` is the descriptor managing the ``children`` attribute\n - ``instance`` is the ``parent`` instance\n - ``cls`` is the ``Parent`` class (unused)\n '
if (instance is None):
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if (key not in instance_cache):
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key] | def __get__(self, instance, cls=None):
'\n Get the related objects through the reverse relation.\n\n With the example above, when getting ``parent.children``:\n\n - ``self`` is the descriptor managing the ``children`` attribute\n - ``instance`` is the ``parent`` instance\n - ``cls`` is the ``Parent`` class (unused)\n '
if (instance is None):
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if (key not in instance_cache):
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key]<|docstring|>Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)<|endoftext|> |
690164ed1c57b5b8fe67c4d9f13bafb1f1388b255017a8bb3abada40a0c9800d | def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == '') and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset | Filter the queryset for the instance this manager is bound to. | django/db/models/fields/related_descriptors.py | _apply_rel_filters | cangSDARM/django | 61,676 | python | def _apply_rel_filters(self, queryset):
'\n \n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == ) and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset | def _apply_rel_filters(self, queryset):
'\n \n '
db = (self._db or router.db_for_read(self.model, instance=self.instance))
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if ((val is None) or ((val == ) and empty_strings_as_null)):
return queryset.none()
if self.field.many_to_one:
try:
target_field = self.field.target_field
except FieldError:
rel_obj_id = tuple([getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[(- 1)].target_fields])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset<|docstring|>Filter the queryset for the instance this manager is bound to.<|endoftext|> |
28ae2cf22d0b7257827e4d7b989185edacf613269336db9f1d9a80c802eca8fd | def _apply_rel_filters(self, queryset):
'\n Filter the queryset for the instance this manager is bound to.\n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters) | Filter the queryset for the instance this manager is bound to. | django/db/models/fields/related_descriptors.py | _apply_rel_filters | cangSDARM/django | 61,676 | python | def _apply_rel_filters(self, queryset):
'\n \n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters) | def _apply_rel_filters(self, queryset):
'\n \n '
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)<|docstring|>Filter the queryset for the instance this manager is bound to.<|endoftext|> |
1f23aed951f377dab52d37065255f074cc7f2023a1058771d46b929e799e8f9f | def _get_target_ids(self, target_field_name, objs):
'\n Return the set of ids of `objs` that the target field references.\n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids | Return the set of ids of `objs` that the target field references. | django/db/models/fields/related_descriptors.py | _get_target_ids | cangSDARM/django | 61,676 | python | def _get_target_ids(self, target_field_name, objs):
'\n \n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids | def _get_target_ids(self, target_field_name, objs):
'\n \n '
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if (not router.allow_relation(obj, self.instance)):
raise ValueError(('Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db)))
target_id = target_field.get_foreign_related_value(obj)[0]
if (target_id is None):
raise ValueError(('Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name)))
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)))
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids<|docstring|>Return the set of ids of `objs` that the target field references.<|endoftext|> |
79c27c3e7e771cafaddf01336bf6c38a3ce3b3ac81d13ebdd964655850c68cde | def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals) | Return the subset of ids of `objs` that aren't already assigned to
this relationship. | django/db/models/fields/related_descriptors.py | _get_missing_target_ids | cangSDARM/django | 61,676 | python | def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals) | def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"\n Return the subset of ids of `objs` that aren't already assigned to\n this relationship.\n "
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], ('%s__in' % target_field_name): target_ids})
return target_ids.difference(vals)<|docstring|>Return the subset of ids of `objs` that aren't already assigned to
this relationship.<|endoftext|> |
00b269b5e10d6b7f8c54852c231b6c7ec0734410160963c482373f20dc09f277 | def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals))) | Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed. | django/db/models/fields/related_descriptors.py | _get_add_plan | cangSDARM/django | 61,676 | python | def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals))) | def _get_add_plan(self, db, source_field_name):
'\n Return a boolean triple of the way the add should be performed.\n\n The first element is whether or not bulk_create(ignore_conflicts)\n can be used, the second whether or not signals must be sent, and\n the third element is whether or not the immediate bulk insertion\n with conflicts ignored can be performed.\n '
can_ignore_conflicts = ((self.through._meta.auto_created is not False) and connections[db].features.supports_ignore_conflicts)
must_send_signals = ((self.reverse or (source_field_name == self.source_field_name)) and signals.m2m_changed.has_listeners(self.through))
return (can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and (not must_send_signals)))<|docstring|>Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed.<|endoftext|> |
b7e5fb957d3eb0589aabeb73c9465b3fdf3ca99f8c63b2997fe6ee6d021076da | @staticmethod
def dispatch(f: Callable) -> Callable:
'\n This method allows the use of singledispatch on methods, \n a feature that will be implemented in functools in Python 3.8.x+ in the future.\n\n :param f: The decorated method.\n :type f: Callable\n\n :returns: Decorator method which takes the type of the second parameter instead of the first, \n as the first is self in a method, and passes this type on to the dispatcher.\n :rtype: Callable\n '
dispatcher = singledispatch(f)
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}):
return dispatcher.dispatch(args[0].__class__)(instance, *args, **kwargs)
out = wrapper(f)
out.register = dispatcher.register
return out | This method allows the use of singledispatch on methods,
a feature that will be implemented in functools in Python 3.8.x+ in the future.
:param f: The decorated method.
:type f: Callable
:returns: Decorator method which takes the type of the second parameter instead of the first,
as the first is self in a method, and passes this type on to the dispatcher.
:rtype: Callable | TheNounProjectAPI/call.py | dispatch | CubieDev/TheNounProjectAPI | 8 | python | @staticmethod
def dispatch(f: Callable) -> Callable:
'\n This method allows the use of singledispatch on methods, \n a feature that will be implemented in functools in Python 3.8.x+ in the future.\n\n :param f: The decorated method.\n :type f: Callable\n\n :returns: Decorator method which takes the type of the second parameter instead of the first, \n as the first is self in a method, and passes this type on to the dispatcher.\n :rtype: Callable\n '
dispatcher = singledispatch(f)
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}):
return dispatcher.dispatch(args[0].__class__)(instance, *args, **kwargs)
out = wrapper(f)
out.register = dispatcher.register
return out | @staticmethod
def dispatch(f: Callable) -> Callable:
'\n This method allows the use of singledispatch on methods, \n a feature that will be implemented in functools in Python 3.8.x+ in the future.\n\n :param f: The decorated method.\n :type f: Callable\n\n :returns: Decorator method which takes the type of the second parameter instead of the first, \n as the first is self in a method, and passes this type on to the dispatcher.\n :rtype: Callable\n '
dispatcher = singledispatch(f)
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}):
return dispatcher.dispatch(args[0].__class__)(instance, *args, **kwargs)
out = wrapper(f)
out.register = dispatcher.register
return out<|docstring|>This method allows the use of singledispatch on methods,
a feature that will be implemented in functools in Python 3.8.x+ in the future.
:param f: The decorated method.
:type f: Callable
:returns: Decorator method which takes the type of the second parameter instead of the first,
as the first is self in a method, and passes this type on to the dispatcher.
:rtype: Callable<|endoftext|> |
e132044d7e8f31e749ce83d1691db0652363c2dcd301b05e33af8682e9fded3f | @staticmethod
def _get_endpoint(model_class: Union[(Type[Model], Type[ModelList])], method: str) -> Callable:
'\n Returns wrapper which receives a requests.PreparedRequests, \n sends this request, checks for exceptions, and returns the json parsed through the correct model.\n\n :param model_class: The class of the model to use for the output data.\n :type model_class: Union[Type[Model], Type[ModelList]]\n :param method: String form of which method to use. Either "GET" or "POST".\n :type method: str\n\n :returns: Decorator function.\n :rtype: Callable\n '
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}) -> Union[(Model, List[Model])]:
instance._method = method
prepared_request = wrapped(*args, **kwargs)
if instance._testing:
return prepared_request
response = instance._send(prepared_request)
if (response.status_code in STATUS_CODE_SUCCESS):
json_data = response.json()
model = model_class()
return model.parse(json_data, response)
elif (response.status_code in STATUS_CODE_EXCEPTIONS):
raise STATUS_CODE_EXCEPTIONS[response.status_code](response)
else:
raise UnknownStatusCode(response)
return wrapper | Returns wrapper which receives a requests.PreparedRequests,
sends this request, checks for exceptions, and returns the json parsed through the correct model.
:param model_class: The class of the model to use for the output data.
:type model_class: Union[Type[Model], Type[ModelList]]
:param method: String form of which method to use. Either "GET" or "POST".
:type method: str
:returns: Decorator function.
:rtype: Callable | TheNounProjectAPI/call.py | _get_endpoint | CubieDev/TheNounProjectAPI | 8 | python | @staticmethod
def _get_endpoint(model_class: Union[(Type[Model], Type[ModelList])], method: str) -> Callable:
'\n Returns wrapper which receives a requests.PreparedRequests, \n sends this request, checks for exceptions, and returns the json parsed through the correct model.\n\n :param model_class: The class of the model to use for the output data.\n :type model_class: Union[Type[Model], Type[ModelList]]\n :param method: String form of which method to use. Either "GET" or "POST".\n :type method: str\n\n :returns: Decorator function.\n :rtype: Callable\n '
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}) -> Union[(Model, List[Model])]:
instance._method = method
prepared_request = wrapped(*args, **kwargs)
if instance._testing:
return prepared_request
response = instance._send(prepared_request)
if (response.status_code in STATUS_CODE_SUCCESS):
json_data = response.json()
model = model_class()
return model.parse(json_data, response)
elif (response.status_code in STATUS_CODE_EXCEPTIONS):
raise STATUS_CODE_EXCEPTIONS[response.status_code](response)
else:
raise UnknownStatusCode(response)
return wrapper | @staticmethod
def _get_endpoint(model_class: Union[(Type[Model], Type[ModelList])], method: str) -> Callable:
'\n Returns wrapper which receives a requests.PreparedRequests, \n sends this request, checks for exceptions, and returns the json parsed through the correct model.\n\n :param model_class: The class of the model to use for the output data.\n :type model_class: Union[Type[Model], Type[ModelList]]\n :param method: String form of which method to use. Either "GET" or "POST".\n :type method: str\n\n :returns: Decorator function.\n :rtype: Callable\n '
@wrapt.decorator
def wrapper(wrapped, instance=None, args=(), kwargs={}) -> Union[(Model, List[Model])]:
instance._method = method
prepared_request = wrapped(*args, **kwargs)
if instance._testing:
return prepared_request
response = instance._send(prepared_request)
if (response.status_code in STATUS_CODE_SUCCESS):
json_data = response.json()
model = model_class()
return model.parse(json_data, response)
elif (response.status_code in STATUS_CODE_EXCEPTIONS):
raise STATUS_CODE_EXCEPTIONS[response.status_code](response)
else:
raise UnknownStatusCode(response)
return wrapper<|docstring|>Returns wrapper which receives a requests.PreparedRequests,
sends this request, checks for exceptions, and returns the json parsed through the correct model.
:param model_class: The class of the model to use for the output data.
:type model_class: Union[Type[Model], Type[ModelList]]
:param method: String form of which method to use. Either "GET" or "POST".
:type method: str
:returns: Decorator function.
:rtype: Callable<|endoftext|> |
af029fbddc7428c9b180565c4988d7e9076dea70e8e8209f3eb4f8c749900233 | def error(bot, update, error):
'Log Errors caused by Updates.'
logger.warning('Update "%s" caused error "%s"', update, error) | Log Errors caused by Updates. | src/bot.py | error | ViniciusBernardo/exercicio-estagio | 0 | python | def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error) | def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error)<|docstring|>Log Errors caused by Updates.<|endoftext|> |
68d09f0aadf248f34b51823deffe7b9277b0f1e66194d06c02a84601f953ac7a | def testExpandUsersHomeDirectoryPathSegments(self):
'Tests the _ExpandUsersHomeDirectoryPathSegments function.'
user_account_artifact1 = artifacts.UserAccountArtifact(user_directory='/home/Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(user_directory='/Users/Test2', username='Test2')
user_account_artifact3 = artifacts.UserAccountArtifact(username='Test3')
user_accounts = [user_account_artifact1, user_account_artifact2, user_account_artifact3]
path_segments = ['%%users.homedir%%', '.bashrc']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '/', user_accounts)
expected_expanded_paths = ['/home/Test1/.bashrc', '/Users/Test2/.bashrc']
self.assertEqual(expanded_paths, expected_expanded_paths)
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.userprofile%%', 'Profile']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\Profile', '\\Users\\Test2\\Profile']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp', '%%users.userprofile%%']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp\\%%users.userprofile%%']
self.assertEqual(expanded_paths, expected_expanded_paths) | Tests the _ExpandUsersHomeDirectoryPathSegments function. | tests/engine/path_helper.py | testExpandUsersHomeDirectoryPathSegments | roshanmaskey/plaso | 1,253 | python | def testExpandUsersHomeDirectoryPathSegments(self):
user_account_artifact1 = artifacts.UserAccountArtifact(user_directory='/home/Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(user_directory='/Users/Test2', username='Test2')
user_account_artifact3 = artifacts.UserAccountArtifact(username='Test3')
user_accounts = [user_account_artifact1, user_account_artifact2, user_account_artifact3]
path_segments = ['%%users.homedir%%', '.bashrc']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '/', user_accounts)
expected_expanded_paths = ['/home/Test1/.bashrc', '/Users/Test2/.bashrc']
self.assertEqual(expanded_paths, expected_expanded_paths)
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.userprofile%%', 'Profile']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\Profile', '\\Users\\Test2\\Profile']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp', '%%users.userprofile%%']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp\\%%users.userprofile%%']
self.assertEqual(expanded_paths, expected_expanded_paths) | def testExpandUsersHomeDirectoryPathSegments(self):
user_account_artifact1 = artifacts.UserAccountArtifact(user_directory='/home/Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(user_directory='/Users/Test2', username='Test2')
user_account_artifact3 = artifacts.UserAccountArtifact(username='Test3')
user_accounts = [user_account_artifact1, user_account_artifact2, user_account_artifact3]
path_segments = ['%%users.homedir%%', '.bashrc']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '/', user_accounts)
expected_expanded_paths = ['/home/Test1/.bashrc', '/Users/Test2/.bashrc']
self.assertEqual(expanded_paths, expected_expanded_paths)
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.userprofile%%', 'Profile']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\Profile', '\\Users\\Test2\\Profile']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp', '%%users.userprofile%%']
expanded_paths = path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Temp\\%%users.userprofile%%']
self.assertEqual(expanded_paths, expected_expanded_paths)<|docstring|>Tests the _ExpandUsersHomeDirectoryPathSegments function.<|endoftext|> |
daf0f897cd79ea5007b5f2628c6f16c18d6a3f9140bb0e6325046f78565e146a | def testExpandUsersVariablePathSegments(self):
'Tests the _ExpandUsersVariablePathSegments function.'
user_account_artifact1 = artifacts.UserAccountArtifact(identifier='1000', path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(identifier='1001', path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.appdata%%', 'Microsoft', 'Windows', 'Recent']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
path_segments = ['C:', 'Windows']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Windows']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths) | Tests the _ExpandUsersVariablePathSegments function. | tests/engine/path_helper.py | testExpandUsersVariablePathSegments | roshanmaskey/plaso | 1,253 | python | def testExpandUsersVariablePathSegments(self):
user_account_artifact1 = artifacts.UserAccountArtifact(identifier='1000', path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(identifier='1001', path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.appdata%%', 'Microsoft', 'Windows', 'Recent']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
path_segments = ['C:', 'Windows']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Windows']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths) | def testExpandUsersVariablePathSegments(self):
user_account_artifact1 = artifacts.UserAccountArtifact(identifier='1000', path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(identifier='1001', path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.appdata%%', 'Microsoft', 'Windows', 'Recent']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
path_segments = ['C:', 'Windows']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Windows']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)<|docstring|>Tests the _ExpandUsersVariablePathSegments function.<|endoftext|> |
0f885e5358a2ce6feba199abd96308d5add75912b5f058f5524f5dc33c033dba | def testIsWindowsDrivePathSegment(self):
'Tests the _IsWindowsDrivePathSegment function.'
result = path_helper.PathHelper._IsWindowsDrivePathSegment('C:')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%SystemDrive%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%%environ_systemdrive%%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('Windows')
self.assertFalse(result) | Tests the _IsWindowsDrivePathSegment function. | tests/engine/path_helper.py | testIsWindowsDrivePathSegment | roshanmaskey/plaso | 1,253 | python | def testIsWindowsDrivePathSegment(self):
result = path_helper.PathHelper._IsWindowsDrivePathSegment('C:')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%SystemDrive%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%%environ_systemdrive%%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('Windows')
self.assertFalse(result) | def testIsWindowsDrivePathSegment(self):
result = path_helper.PathHelper._IsWindowsDrivePathSegment('C:')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%SystemDrive%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('%%environ_systemdrive%%')
self.assertTrue(result)
result = path_helper.PathHelper._IsWindowsDrivePathSegment('Windows')
self.assertFalse(result)<|docstring|>Tests the _IsWindowsDrivePathSegment function.<|endoftext|> |
480946e3f1df20a7620bb903db8d93b996bdf179162c91cd716fd3b8f9246d6b | def testExpandGlobStars(self):
'Tests the ExpandGlobStars function.'
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**4', '/')
self.assertEqual(len(paths), 4)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**99', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/my**', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/my**'])
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**.exe', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/**.exe']) | Tests the ExpandGlobStars function. | tests/engine/path_helper.py | testExpandGlobStars | roshanmaskey/plaso | 1,253 | python | def testExpandGlobStars(self):
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**4', '/')
self.assertEqual(len(paths), 4)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**99', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/my**', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/my**'])
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**.exe', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/**.exe']) | def testExpandGlobStars(self):
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**4', '/')
self.assertEqual(len(paths), 4)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**99', '/')
self.assertEqual(len(paths), 10)
expected_paths = sorted(['/etc/sysconfig/*', '/etc/sysconfig/*/*', '/etc/sysconfig/*/*/*', '/etc/sysconfig/*/*/*/*', '/etc/sysconfig/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*', '/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), expected_paths)
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/my**', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/my**'])
paths = path_helper.PathHelper.ExpandGlobStars('/etc/sysconfig/**.exe', '/')
self.assertEqual(len(paths), 1)
self.assertEqual(paths, ['/etc/sysconfig/**.exe'])<|docstring|>Tests the ExpandGlobStars function.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.