body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
27f385e3e2f0630c9f76bd5fc315fbe02f1eac5dfaf38199fad618a51eff3f26 | def cls_from_str(name: str) -> type:
'Returns a class object with the name given as a string.'
try:
(module_name, cls_name) = name.split(':')
except ValueError:
raise ConfigError('Expected class description in a `module.submodules:ClassName` form, but got `{}`'.format(name))
return getattr(importlib.import_module(module_name), cls_name) | Returns a class object with the name given as a string. | deeppavlov/core/common/registry.py | cls_from_str | Graygood/DeepPavlov | 5,893 | python | def cls_from_str(name: str) -> type:
try:
(module_name, cls_name) = name.split(':')
except ValueError:
raise ConfigError('Expected class description in a `module.submodules:ClassName` form, but got `{}`'.format(name))
return getattr(importlib.import_module(module_name), cls_name) | def cls_from_str(name: str) -> type:
try:
(module_name, cls_name) = name.split(':')
except ValueError:
raise ConfigError('Expected class description in a `module.submodules:ClassName` form, but got `{}`'.format(name))
return getattr(importlib.import_module(module_name), cls_name)<|docstring|>Returns a class object with the name given as a string.<|endoftext|> |
b33de8c1a9081294a87f0f8f18f2240b6803ef38a71e532a106ce895d282f77e | def register(name: str=None) -> type:
'\n Register classes that could be initialized from JSON configuration file.\n If name is not passed, the class name is converted to snake-case.\n '
def decorate(model_cls: type, reg_name: str=None) -> type:
model_name = (reg_name or short_name(model_cls))
global _REGISTRY
cls_name = ((model_cls.__module__ + ':') + model_cls.__name__)
if ((model_name in _REGISTRY) and (_REGISTRY[model_name] != cls_name)):
logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name))
_REGISTRY[model_name] = cls_name
return model_cls
return (lambda model_cls_name: decorate(model_cls_name, name)) | Register classes that could be initialized from JSON configuration file.
If name is not passed, the class name is converted to snake-case. | deeppavlov/core/common/registry.py | register | Graygood/DeepPavlov | 5,893 | python | def register(name: str=None) -> type:
'\n Register classes that could be initialized from JSON configuration file.\n If name is not passed, the class name is converted to snake-case.\n '
def decorate(model_cls: type, reg_name: str=None) -> type:
model_name = (reg_name or short_name(model_cls))
global _REGISTRY
cls_name = ((model_cls.__module__ + ':') + model_cls.__name__)
if ((model_name in _REGISTRY) and (_REGISTRY[model_name] != cls_name)):
logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name))
_REGISTRY[model_name] = cls_name
return model_cls
return (lambda model_cls_name: decorate(model_cls_name, name)) | def register(name: str=None) -> type:
'\n Register classes that could be initialized from JSON configuration file.\n If name is not passed, the class name is converted to snake-case.\n '
def decorate(model_cls: type, reg_name: str=None) -> type:
model_name = (reg_name or short_name(model_cls))
global _REGISTRY
cls_name = ((model_cls.__module__ + ':') + model_cls.__name__)
if ((model_name in _REGISTRY) and (_REGISTRY[model_name] != cls_name)):
logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name))
_REGISTRY[model_name] = cls_name
return model_cls
return (lambda model_cls_name: decorate(model_cls_name, name))<|docstring|>Register classes that could be initialized from JSON configuration file.
If name is not passed, the class name is converted to snake-case.<|endoftext|> |
1c3da28c1543d5e519ed5b6be34c064c87c30099cebd5b9277c64229f92b29d7 | def short_name(cls: type) -> str:
'Returns just a class name (without package and module specification).'
return cls.__name__.split('.')[(- 1)] | Returns just a class name (without package and module specification). | deeppavlov/core/common/registry.py | short_name | Graygood/DeepPavlov | 5,893 | python | def short_name(cls: type) -> str:
return cls.__name__.split('.')[(- 1)] | def short_name(cls: type) -> str:
return cls.__name__.split('.')[(- 1)]<|docstring|>Returns just a class name (without package and module specification).<|endoftext|> |
b9b4982a7df4db0cd56cddd63faec982cd495d282d87fb70b8edf7b7a9709647 | def get_model(name: str) -> type:
'Returns a registered class object with the name given in the string.'
if (name not in _REGISTRY):
if (':' not in name):
raise ConfigError('Model {} is not registered.'.format(name))
return cls_from_str(name)
return cls_from_str(_REGISTRY[name]) | Returns a registered class object with the name given in the string. | deeppavlov/core/common/registry.py | get_model | Graygood/DeepPavlov | 5,893 | python | def get_model(name: str) -> type:
if (name not in _REGISTRY):
if (':' not in name):
raise ConfigError('Model {} is not registered.'.format(name))
return cls_from_str(name)
return cls_from_str(_REGISTRY[name]) | def get_model(name: str) -> type:
if (name not in _REGISTRY):
if (':' not in name):
raise ConfigError('Model {} is not registered.'.format(name))
return cls_from_str(name)
return cls_from_str(_REGISTRY[name])<|docstring|>Returns a registered class object with the name given in the string.<|endoftext|> |
9117c91b7cae2998231920e23ddc50d36d82d46dc52f7269f7b40c158705f89a | def list_models() -> list:
'Returns a list of names of registered classes.'
return list(_REGISTRY) | Returns a list of names of registered classes. | deeppavlov/core/common/registry.py | list_models | Graygood/DeepPavlov | 5,893 | python | def list_models() -> list:
return list(_REGISTRY) | def list_models() -> list:
return list(_REGISTRY)<|docstring|>Returns a list of names of registered classes.<|endoftext|> |
f8843bad853e8681c8900b2a27acfb4ac9b645d109b674d662696e57602e9861 | def __init__(self, **kwargs):
'\n Initializes a new UpdateContainerConfigurationDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param is_repository_created_on_first_push:\n The value to assign to the is_repository_created_on_first_push property of this UpdateContainerConfigurationDetails.\n :type is_repository_created_on_first_push: bool\n\n '
self.swagger_types = {'is_repository_created_on_first_push': 'bool'}
self.attribute_map = {'is_repository_created_on_first_push': 'isRepositoryCreatedOnFirstPush'}
self._is_repository_created_on_first_push = None | Initializes a new UpdateContainerConfigurationDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_repository_created_on_first_push:
The value to assign to the is_repository_created_on_first_push property of this UpdateContainerConfigurationDetails.
:type is_repository_created_on_first_push: bool | src/oci/artifacts/models/update_container_configuration_details.py | __init__ | ezequielramos/oci-python-sdk | 249 | python | def __init__(self, **kwargs):
'\n Initializes a new UpdateContainerConfigurationDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param is_repository_created_on_first_push:\n The value to assign to the is_repository_created_on_first_push property of this UpdateContainerConfigurationDetails.\n :type is_repository_created_on_first_push: bool\n\n '
self.swagger_types = {'is_repository_created_on_first_push': 'bool'}
self.attribute_map = {'is_repository_created_on_first_push': 'isRepositoryCreatedOnFirstPush'}
self._is_repository_created_on_first_push = None | def __init__(self, **kwargs):
'\n Initializes a new UpdateContainerConfigurationDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param is_repository_created_on_first_push:\n The value to assign to the is_repository_created_on_first_push property of this UpdateContainerConfigurationDetails.\n :type is_repository_created_on_first_push: bool\n\n '
self.swagger_types = {'is_repository_created_on_first_push': 'bool'}
self.attribute_map = {'is_repository_created_on_first_push': 'isRepositoryCreatedOnFirstPush'}
self._is_repository_created_on_first_push = None<|docstring|>Initializes a new UpdateContainerConfigurationDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_repository_created_on_first_push:
The value to assign to the is_repository_created_on_first_push property of this UpdateContainerConfigurationDetails.
:type is_repository_created_on_first_push: bool<|endoftext|> |
c21f377be5b355aedcedb328c9cf77c28760a9b640d30092e2e8c76b4ef4a6de | @property
def is_repository_created_on_first_push(self):
'\n Gets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :return: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :rtype: bool\n '
return self._is_repository_created_on_first_push | Gets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
Whether to create a new container repository when a container is pushed to a new repository path.
Repositories created in this way belong to the root compartment.
:return: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
:rtype: bool | src/oci/artifacts/models/update_container_configuration_details.py | is_repository_created_on_first_push | ezequielramos/oci-python-sdk | 249 | python | @property
def is_repository_created_on_first_push(self):
'\n Gets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :return: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :rtype: bool\n '
return self._is_repository_created_on_first_push | @property
def is_repository_created_on_first_push(self):
'\n Gets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :return: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :rtype: bool\n '
return self._is_repository_created_on_first_push<|docstring|>Gets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
Whether to create a new container repository when a container is pushed to a new repository path.
Repositories created in this way belong to the root compartment.
:return: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
:rtype: bool<|endoftext|> |
0b4832ac5fd73a00766706e163d764fcfe9f6b06574fe1025fa81509137c819d | @is_repository_created_on_first_push.setter
def is_repository_created_on_first_push(self, is_repository_created_on_first_push):
'\n Sets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :param is_repository_created_on_first_push: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :type: bool\n '
self._is_repository_created_on_first_push = is_repository_created_on_first_push | Sets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
Whether to create a new container repository when a container is pushed to a new repository path.
Repositories created in this way belong to the root compartment.
:param is_repository_created_on_first_push: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
:type: bool | src/oci/artifacts/models/update_container_configuration_details.py | is_repository_created_on_first_push | ezequielramos/oci-python-sdk | 249 | python | @is_repository_created_on_first_push.setter
def is_repository_created_on_first_push(self, is_repository_created_on_first_push):
'\n Sets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :param is_repository_created_on_first_push: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :type: bool\n '
self._is_repository_created_on_first_push = is_repository_created_on_first_push | @is_repository_created_on_first_push.setter
def is_repository_created_on_first_push(self, is_repository_created_on_first_push):
'\n Sets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n Whether to create a new container repository when a container is pushed to a new repository path.\n Repositories created in this way belong to the root compartment.\n\n\n :param is_repository_created_on_first_push: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.\n :type: bool\n '
self._is_repository_created_on_first_push = is_repository_created_on_first_push<|docstring|>Sets the is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
Whether to create a new container repository when a container is pushed to a new repository path.
Repositories created in this way belong to the root compartment.
:param is_repository_created_on_first_push: The is_repository_created_on_first_push of this UpdateContainerConfigurationDetails.
:type: bool<|endoftext|> |
cba38f4bf94b308e0c0907a89b113a2b15c370d26272506a74b2f5362a2d4624 | def _get_packages(module, pip, chdir):
'Return results of pip command to get packages.'
command = ('%s list' % pip)
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(command, cwd=chdir, environ_update=lang_env)
if (rc != 0):
command = ('%s freeze' % pip)
(rc, out, err) = module.run_command(command, cwd=chdir)
if (rc != 0):
_fail(module, command, out, err)
return (command, out, err) | Return results of pip command to get packages. | myprojectenv/lib/python3.5/site-packages/ansible/modules/packaging/language/pip.py | _get_packages | lancerenteria/doFlask | 0 | python | def _get_packages(module, pip, chdir):
command = ('%s list' % pip)
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(command, cwd=chdir, environ_update=lang_env)
if (rc != 0):
command = ('%s freeze' % pip)
(rc, out, err) = module.run_command(command, cwd=chdir)
if (rc != 0):
_fail(module, command, out, err)
return (command, out, err) | def _get_packages(module, pip, chdir):
command = ('%s list' % pip)
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(command, cwd=chdir, environ_update=lang_env)
if (rc != 0):
command = ('%s freeze' % pip)
(rc, out, err) = module.run_command(command, cwd=chdir)
if (rc != 0):
_fail(module, command, out, err)
return (command, out, err)<|docstring|>Return results of pip command to get packages.<|endoftext|> |
fadc11a2a739fc48ce2efbc28d10a02a34857b751bc3fe7c31dfc520df5c522e | def _is_present(name, version, installed_pkgs, pkg_command):
'Return whether or not package is installed.'
for pkg in installed_pkgs:
if ('list' in pkg_command):
pkg = pkg.replace('(', '').replace(')', '')
if (',' in pkg):
(pkg_name, pkg_version, _) = pkg.replace(',', '').split(' ')
else:
(pkg_name, pkg_version) = pkg.split(' ')
elif ('freeze' in pkg_command):
if ('==' in pkg):
(pkg_name, pkg_version) = pkg.split('==')
else:
continue
else:
continue
if ((pkg_name == name) and ((version is None) or (version == pkg_version))):
return True
return False | Return whether or not package is installed. | myprojectenv/lib/python3.5/site-packages/ansible/modules/packaging/language/pip.py | _is_present | lancerenteria/doFlask | 0 | python | def _is_present(name, version, installed_pkgs, pkg_command):
for pkg in installed_pkgs:
if ('list' in pkg_command):
pkg = pkg.replace('(', ).replace(')', )
if (',' in pkg):
(pkg_name, pkg_version, _) = pkg.replace(',', ).split(' ')
else:
(pkg_name, pkg_version) = pkg.split(' ')
elif ('freeze' in pkg_command):
if ('==' in pkg):
(pkg_name, pkg_version) = pkg.split('==')
else:
continue
else:
continue
if ((pkg_name == name) and ((version is None) or (version == pkg_version))):
return True
return False | def _is_present(name, version, installed_pkgs, pkg_command):
for pkg in installed_pkgs:
if ('list' in pkg_command):
pkg = pkg.replace('(', ).replace(')', )
if (',' in pkg):
(pkg_name, pkg_version, _) = pkg.replace(',', ).split(' ')
else:
(pkg_name, pkg_version) = pkg.split(' ')
elif ('freeze' in pkg_command):
if ('==' in pkg):
(pkg_name, pkg_version) = pkg.split('==')
else:
continue
else:
continue
if ((pkg_name == name) and ((version is None) or (version == pkg_version))):
return True
return False<|docstring|>Return whether or not package is installed.<|endoftext|> |
7d21994e6f97302108210633d6ca39a637d13ba85a2572a6b479c661df467841 | def _get_package_info(module, package, env=None):
'This is only needed for special packages which do not show up in pip freeze\n\n pip and setuptools fall into this category.\n\n :returns: a string containing the version number if the package is\n installed. None if the package is not installed.\n '
if env:
opt_dirs = [('%s/bin' % env)]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if (python_bin is None):
formatted_dep = None
else:
(rc, out, err) = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = ('%s==%s' % (package, out.strip()))
return formatted_dep | This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed. | myprojectenv/lib/python3.5/site-packages/ansible/modules/packaging/language/pip.py | _get_package_info | lancerenteria/doFlask | 0 | python | def _get_package_info(module, package, env=None):
'This is only needed for special packages which do not show up in pip freeze\n\n pip and setuptools fall into this category.\n\n :returns: a string containing the version number if the package is\n installed. None if the package is not installed.\n '
if env:
opt_dirs = [('%s/bin' % env)]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if (python_bin is None):
formatted_dep = None
else:
(rc, out, err) = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = ('%s==%s' % (package, out.strip()))
return formatted_dep | def _get_package_info(module, package, env=None):
'This is only needed for special packages which do not show up in pip freeze\n\n pip and setuptools fall into this category.\n\n :returns: a string containing the version number if the package is\n installed. None if the package is not installed.\n '
if env:
opt_dirs = [('%s/bin' % env)]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if (python_bin is None):
formatted_dep = None
else:
(rc, out, err) = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = ('%s==%s' % (package, out.strip()))
return formatted_dep<|docstring|>This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.<|endoftext|> |
4ade72d051e15bcf5aa586c0d83365395b509864019caba237686418d516793e | @commands.command(name='serverinfo')
async def serverinfo(self, context):
'\n Get some useful (or not) information about the server.\n '
server = context.message.guild
roles = [x.name for x in server.roles]
role_length = len(roles)
if (role_length > 50):
roles = roles[:50]
roles.append(f'>>>> Displaying[50/{len(roles)}] Roles')
roles = ', '.join(roles)
channels = len(server.channels)
time = str(server.created_at)
time = time.split(' ')
time = time[0]
embed = discord.Embed(title='**Server Name:**', description=f'{server}', color=int(config.EMBED_COLOR, 16))
embed.set_thumbnail(url=server.icon_url)
embed.add_field(name='Owner', value=f'''{server.owner}
{server.owner.id}''')
embed.add_field(name='Server ID', value=server.id)
embed.add_field(name='Member Count', value=server.member_count)
embed.add_field(name='Text/Voice Channels', value=f'{channels}')
embed.add_field(name=f'Roles ({role_length})', value=roles)
embed.set_footer(text=f'Created at: {time}')
(await context.send(embed=embed)) | Get some useful (or not) information about the server. | cogs/general.py | serverinfo | 0xdia/BrainyBot | 29 | python | @commands.command(name='serverinfo')
async def serverinfo(self, context):
'\n \n '
server = context.message.guild
roles = [x.name for x in server.roles]
role_length = len(roles)
if (role_length > 50):
roles = roles[:50]
roles.append(f'>>>> Displaying[50/{len(roles)}] Roles')
roles = ', '.join(roles)
channels = len(server.channels)
time = str(server.created_at)
time = time.split(' ')
time = time[0]
embed = discord.Embed(title='**Server Name:**', description=f'{server}', color=int(config.EMBED_COLOR, 16))
embed.set_thumbnail(url=server.icon_url)
embed.add_field(name='Owner', value=f'{server.owner}
{server.owner.id}')
embed.add_field(name='Server ID', value=server.id)
embed.add_field(name='Member Count', value=server.member_count)
embed.add_field(name='Text/Voice Channels', value=f'{channels}')
embed.add_field(name=f'Roles ({role_length})', value=roles)
embed.set_footer(text=f'Created at: {time}')
(await context.send(embed=embed)) | @commands.command(name='serverinfo')
async def serverinfo(self, context):
'\n \n '
server = context.message.guild
roles = [x.name for x in server.roles]
role_length = len(roles)
if (role_length > 50):
roles = roles[:50]
roles.append(f'>>>> Displaying[50/{len(roles)}] Roles')
roles = ', '.join(roles)
channels = len(server.channels)
time = str(server.created_at)
time = time.split(' ')
time = time[0]
embed = discord.Embed(title='**Server Name:**', description=f'{server}', color=int(config.EMBED_COLOR, 16))
embed.set_thumbnail(url=server.icon_url)
embed.add_field(name='Owner', value=f'{server.owner}
{server.owner.id}')
embed.add_field(name='Server ID', value=server.id)
embed.add_field(name='Member Count', value=server.member_count)
embed.add_field(name='Text/Voice Channels', value=f'{channels}')
embed.add_field(name=f'Roles ({role_length})', value=roles)
embed.set_footer(text=f'Created at: {time}')
(await context.send(embed=embed))<|docstring|>Get some useful (or not) information about the server.<|endoftext|> |
2139835bc49b249adb63ef24258b4c854cea9d0d9768e366f82831041bfaa4ea | @commands.command(name='ping')
async def ping(self, context):
'\n Check if the bot is alive.\n '
embed = discord.Embed(color=int(config.EMBED_COLOR, 16))
embed.add_field(name='Pong!', value=':ping_pong:', inline=True)
embed.set_footer(text=f"π Pong Don't Catch it if you can!{context.message.author}")
(await context.send(embed=embed)) | Check if the bot is alive. | cogs/general.py | ping | 0xdia/BrainyBot | 29 | python | @commands.command(name='ping')
async def ping(self, context):
'\n \n '
embed = discord.Embed(color=int(config.EMBED_COLOR, 16))
embed.add_field(name='Pong!', value=':ping_pong:', inline=True)
embed.set_footer(text=f"π Pong Don't Catch it if you can!{context.message.author}")
(await context.send(embed=embed)) | @commands.command(name='ping')
async def ping(self, context):
'\n \n '
embed = discord.Embed(color=int(config.EMBED_COLOR, 16))
embed.add_field(name='Pong!', value=':ping_pong:', inline=True)
embed.set_footer(text=f"π Pong Don't Catch it if you can!{context.message.author}")
(await context.send(embed=embed))<|docstring|>Check if the bot is alive.<|endoftext|> |
b743e6d17f4ea7f3e4a8cd14d7e1c4a475a1393061a2cca1f57477a5447e772e | @commands.command(name='server')
async def server(self, context):
'\n Get the invite link of the discord server of the bot for some support.\n '
(await context.send('I sent you a private message!'))
(await context.author.send('Join my discord server by clicking here: https://www.gdgalgiers.com/discord')) | Get the invite link of the discord server of the bot for some support. | cogs/general.py | server | 0xdia/BrainyBot | 29 | python | @commands.command(name='server')
async def server(self, context):
'\n \n '
(await context.send('I sent you a private message!'))
(await context.author.send('Join my discord server by clicking here: https://www.gdgalgiers.com/discord')) | @commands.command(name='server')
async def server(self, context):
'\n \n '
(await context.send('I sent you a private message!'))
(await context.author.send('Join my discord server by clicking here: https://www.gdgalgiers.com/discord'))<|docstring|>Get the invite link of the discord server of the bot for some support.<|endoftext|> |
e29b45165369d321668af212a0f454ef28e84bb06880aa64a2939d2c4a2012c3 | @commands.command(name='poll')
async def poll(self, context, *args):
'\n Create a poll where members can vote.\n '
poll_title = ' '.join(args)
embed = discord.Embed(title='A new poll has been created!', description=f'{poll_title}', color=int(config.EMBED_COLOR, 16))
embed.set_footer(text=f'Poll created by: {context.message.author} β’ React to vote!')
embed_message = (await context.send(embed=embed))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π€·')) | Create a poll where members can vote. | cogs/general.py | poll | 0xdia/BrainyBot | 29 | python | @commands.command(name='poll')
async def poll(self, context, *args):
'\n \n '
poll_title = ' '.join(args)
embed = discord.Embed(title='A new poll has been created!', description=f'{poll_title}', color=int(config.EMBED_COLOR, 16))
embed.set_footer(text=f'Poll created by: {context.message.author} β’ React to vote!')
embed_message = (await context.send(embed=embed))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π€·')) | @commands.command(name='poll')
async def poll(self, context, *args):
'\n \n '
poll_title = ' '.join(args)
embed = discord.Embed(title='A new poll has been created!', description=f'{poll_title}', color=int(config.EMBED_COLOR, 16))
embed.set_footer(text=f'Poll created by: {context.message.author} β’ React to vote!')
embed_message = (await context.send(embed=embed))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π'))
(await embed_message.add_reaction('π€·'))<|docstring|>Create a poll where members can vote.<|endoftext|> |
b8ea299f3976b3b66362961bc2180c2ef8e81054e882df030bf982218c16e766 | @commands.dm_only()
@commands.command(name='isSpotOpen')
async def isSpotOpen(self, context):
'\n check if the GDG Algiers spot is open or not\n '
if loads(open('config.json', 'r').read().strip())['spot']:
sit = 'Open'
else:
sit = 'Close'
(await send_embed(context, '', f'Currently, the spot is {sit}.')) | check if the GDG Algiers spot is open or not | cogs/general.py | isSpotOpen | 0xdia/BrainyBot | 29 | python | @commands.dm_only()
@commands.command(name='isSpotOpen')
async def isSpotOpen(self, context):
'\n \n '
if loads(open('config.json', 'r').read().strip())['spot']:
sit = 'Open'
else:
sit = 'Close'
(await send_embed(context, , f'Currently, the spot is {sit}.')) | @commands.dm_only()
@commands.command(name='isSpotOpen')
async def isSpotOpen(self, context):
'\n \n '
if loads(open('config.json', 'r').read().strip())['spot']:
sit = 'Open'
else:
sit = 'Close'
(await send_embed(context, , f'Currently, the spot is {sit}.'))<|docstring|>check if the GDG Algiers spot is open or not<|endoftext|> |
e41e3d9e67de7340dc8666e399d96bc1129e3e7a63f96d9cf0903429e8f1da3f | @commands.dm_only()
@commands.command(name='spot')
async def spot(self, context):
'\n open the spot if its closed or close it if opened\n '
if (context.message.author.id not in config.COMANAGERS_IDs):
raise AuthorizationError()
else:
dict = loads(open('config.json', 'r').read().strip())
with open('config.json', 'w+') as f:
if dict['spot']:
dict['spot'] = False
new_value = 'Closed'
f.write(dumps(dict))
else:
dict['spot'] = True
new_value = 'Open'
f.write(dumps(dict))
(await send_embed(context, '', f'Now, the spot became {new_value}.')) | open the spot if its closed or close it if opened | cogs/general.py | spot | 0xdia/BrainyBot | 29 | python | @commands.dm_only()
@commands.command(name='spot')
async def spot(self, context):
'\n \n '
if (context.message.author.id not in config.COMANAGERS_IDs):
raise AuthorizationError()
else:
dict = loads(open('config.json', 'r').read().strip())
with open('config.json', 'w+') as f:
if dict['spot']:
dict['spot'] = False
new_value = 'Closed'
f.write(dumps(dict))
else:
dict['spot'] = True
new_value = 'Open'
f.write(dumps(dict))
(await send_embed(context, , f'Now, the spot became {new_value}.')) | @commands.dm_only()
@commands.command(name='spot')
async def spot(self, context):
'\n \n '
if (context.message.author.id not in config.COMANAGERS_IDs):
raise AuthorizationError()
else:
dict = loads(open('config.json', 'r').read().strip())
with open('config.json', 'w+') as f:
if dict['spot']:
dict['spot'] = False
new_value = 'Closed'
f.write(dumps(dict))
else:
dict['spot'] = True
new_value = 'Open'
f.write(dumps(dict))
(await send_embed(context, , f'Now, the spot became {new_value}.'))<|docstring|>open the spot if its closed or close it if opened<|endoftext|> |
ef4ef118088b596743ab50d38fcb96e4c2945e0c9e09ffee2f919681b97f5c23 | def visit(self, tag):
'Visit the tag\n\n Args:\n tag: The tag\n '
if (tag.name == 'LITERAL'):
tag.open_compact = (self._prev_tag and self._prev_tag.close_compact)
elif (self._prev_tag and (self._prev_tag.name == 'LITERAL')):
self._prev_tag.close_compact = tag.open_compact
self._prev_tag = tag
if tag.name.startswith('end'):
self._end_tag(tag)
else:
self._start_tag(tag)
tag.parse()
if (tag.name == 'block'):
self.blocks[tag.parsed] = tag | Visit the tag
Args:
tag: The tag | liquid/parser.py | visit | pemontto/liquidpy | 0 | python | def visit(self, tag):
'Visit the tag\n\n Args:\n tag: The tag\n '
if (tag.name == 'LITERAL'):
tag.open_compact = (self._prev_tag and self._prev_tag.close_compact)
elif (self._prev_tag and (self._prev_tag.name == 'LITERAL')):
self._prev_tag.close_compact = tag.open_compact
self._prev_tag = tag
if tag.name.startswith('end'):
self._end_tag(tag)
else:
self._start_tag(tag)
tag.parse()
if (tag.name == 'block'):
self.blocks[tag.parsed] = tag | def visit(self, tag):
'Visit the tag\n\n Args:\n tag: The tag\n '
if (tag.name == 'LITERAL'):
tag.open_compact = (self._prev_tag and self._prev_tag.close_compact)
elif (self._prev_tag and (self._prev_tag.name == 'LITERAL')):
self._prev_tag.close_compact = tag.open_compact
self._prev_tag = tag
if tag.name.startswith('end'):
self._end_tag(tag)
else:
self._start_tag(tag)
tag.parse()
if (tag.name == 'block'):
self.blocks[tag.parsed] = tag<|docstring|>Visit the tag
Args:
tag: The tag<|endoftext|> |
7ea5831b55475864f924fba2928e9da6070889db1f86088457adf12a497e9c6a | def _start_tag(self, tag):
'Encounter a start tag, try to solve the structure'
if (not self.stack):
if tag.parent_required:
raise LiquidSyntaxError(f'One of the parent tags is required: {tag.PARENT_TAGS}', tag.context, tag.parser)
if tag.elder_required:
raise LiquidSyntaxError(f'One of the elder tags is required: {tag.ELDER_TAGS}', tag.context, tag.parser)
self.root.children.append(tag)
tag.parent = self.root
else:
if tag.is_elder(self.stack[(- 1)]):
prev_tag = self.stack.pop()
prev_tag.next = tag
tag.prev = prev_tag
tag.context.level = prev_tag.context.level
if self.stack:
self.stack[(- 1)].children.append(tag)
tag.parent = self.stack[(- 1)]
tag.context.level = (tag.parent.context.level + 1)
tag.parsing_self = tag.parent.parsing_children
tag.parsing_children = tag.parent.parsing_children
if (not tag.check_parents()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects parents: {tag.PARENT_TAGS}', tag.context, tag.parser)
if (not tag.check_elders()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects elder tags: {tag.ELDER_TAGS}', tag.context, tag.parser)
if (not tag.VOID):
self.stack.append(tag) | Encounter a start tag, try to solve the structure | liquid/parser.py | _start_tag | pemontto/liquidpy | 0 | python | def _start_tag(self, tag):
if (not self.stack):
if tag.parent_required:
raise LiquidSyntaxError(f'One of the parent tags is required: {tag.PARENT_TAGS}', tag.context, tag.parser)
if tag.elder_required:
raise LiquidSyntaxError(f'One of the elder tags is required: {tag.ELDER_TAGS}', tag.context, tag.parser)
self.root.children.append(tag)
tag.parent = self.root
else:
if tag.is_elder(self.stack[(- 1)]):
prev_tag = self.stack.pop()
prev_tag.next = tag
tag.prev = prev_tag
tag.context.level = prev_tag.context.level
if self.stack:
self.stack[(- 1)].children.append(tag)
tag.parent = self.stack[(- 1)]
tag.context.level = (tag.parent.context.level + 1)
tag.parsing_self = tag.parent.parsing_children
tag.parsing_children = tag.parent.parsing_children
if (not tag.check_parents()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects parents: {tag.PARENT_TAGS}', tag.context, tag.parser)
if (not tag.check_elders()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects elder tags: {tag.ELDER_TAGS}', tag.context, tag.parser)
if (not tag.VOID):
self.stack.append(tag) | def _start_tag(self, tag):
if (not self.stack):
if tag.parent_required:
raise LiquidSyntaxError(f'One of the parent tags is required: {tag.PARENT_TAGS}', tag.context, tag.parser)
if tag.elder_required:
raise LiquidSyntaxError(f'One of the elder tags is required: {tag.ELDER_TAGS}', tag.context, tag.parser)
self.root.children.append(tag)
tag.parent = self.root
else:
if tag.is_elder(self.stack[(- 1)]):
prev_tag = self.stack.pop()
prev_tag.next = tag
tag.prev = prev_tag
tag.context.level = prev_tag.context.level
if self.stack:
self.stack[(- 1)].children.append(tag)
tag.parent = self.stack[(- 1)]
tag.context.level = (tag.parent.context.level + 1)
tag.parsing_self = tag.parent.parsing_children
tag.parsing_children = tag.parent.parsing_children
if (not tag.check_parents()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects parents: {tag.PARENT_TAGS}', tag.context, tag.parser)
if (not tag.check_elders()):
raise LiquidSyntaxError(f'Tag {tag.name!r} expects elder tags: {tag.ELDER_TAGS}', tag.context, tag.parser)
if (not tag.VOID):
self.stack.append(tag)<|docstring|>Encounter a start tag, try to solve the structure<|endoftext|> |
ad8a9886d0765f9297cf114b2a54201b2c7bdbc0a10a3c6d359a2b2aa655837c | def _end_tag(self, tag):
'Handle tag relationships when closing a tag.'
tagname = tag.name[3:]
if (not self.stack):
raise LiquidSyntaxError(f'Unexpected endtag: {tag!r}', tag.context, tag.parser)
last_tag = self.stack[(- 1)]
last_eldest = (last_tag.eldest or last_tag)
while last_tag:
if (last_eldest.name == tagname):
self.stack.pop()
break
if (not last_eldest.parent_required):
raise LiquidSyntaxError(f'Tag unclosed: {last_eldest!r}', last_eldest.context, last_eldest.parser)
self.stack.pop()
last_tag = (self.stack[(- 1)] if self.stack else None)
last_eldest = ((last_tag.eldest if last_eldest else None) or last_tag) | Handle tag relationships when closing a tag. | liquid/parser.py | _end_tag | pemontto/liquidpy | 0 | python | def _end_tag(self, tag):
tagname = tag.name[3:]
if (not self.stack):
raise LiquidSyntaxError(f'Unexpected endtag: {tag!r}', tag.context, tag.parser)
last_tag = self.stack[(- 1)]
last_eldest = (last_tag.eldest or last_tag)
while last_tag:
if (last_eldest.name == tagname):
self.stack.pop()
break
if (not last_eldest.parent_required):
raise LiquidSyntaxError(f'Tag unclosed: {last_eldest!r}', last_eldest.context, last_eldest.parser)
self.stack.pop()
last_tag = (self.stack[(- 1)] if self.stack else None)
last_eldest = ((last_tag.eldest if last_eldest else None) or last_tag) | def _end_tag(self, tag):
tagname = tag.name[3:]
if (not self.stack):
raise LiquidSyntaxError(f'Unexpected endtag: {tag!r}', tag.context, tag.parser)
last_tag = self.stack[(- 1)]
last_eldest = (last_tag.eldest or last_tag)
while last_tag:
if (last_eldest.name == tagname):
self.stack.pop()
break
if (not last_eldest.parent_required):
raise LiquidSyntaxError(f'Tag unclosed: {last_eldest!r}', last_eldest.context, last_eldest.parser)
self.stack.pop()
last_tag = (self.stack[(- 1)] if self.stack else None)
last_eldest = ((last_tag.eldest if last_eldest else None) or last_tag)<|docstring|>Handle tag relationships when closing a tag.<|endoftext|> |
819fdb767d2414f3ae7e0a47c2a5b85dc1457f2802d5009419e04ebc34ab1d32 | def parse(self):
'Parser the template for later rendering.\n\n Returns:\n The root tag for later rendering\n '
logger.debug('%s- PARSING %r ...', (self.context.level * LIQUID_LOG_INDENT), self.context.name)
while True:
scanned = self.nodescanner.consume(self.context.stream)
if (scanned is False):
self.visitor.root.parse()
logger.debug('%s END PARSING.', (self.context.level * LIQUID_LOG_INDENT))
break
if (scanned is True):
continue
tag = scanned.tag
if ((not tag.SECURE) and self.config.strict):
raise LiquidSyntaxError(f'Tag not allowed in strict mode: {tag!r}', tag.context, self)
self.visitor.visit(tag)
return self.visitor.root | Parser the template for later rendering.
Returns:
The root tag for later rendering | liquid/parser.py | parse | pemontto/liquidpy | 0 | python | def parse(self):
'Parser the template for later rendering.\n\n Returns:\n The root tag for later rendering\n '
logger.debug('%s- PARSING %r ...', (self.context.level * LIQUID_LOG_INDENT), self.context.name)
while True:
scanned = self.nodescanner.consume(self.context.stream)
if (scanned is False):
self.visitor.root.parse()
logger.debug('%s END PARSING.', (self.context.level * LIQUID_LOG_INDENT))
break
if (scanned is True):
continue
tag = scanned.tag
if ((not tag.SECURE) and self.config.strict):
raise LiquidSyntaxError(f'Tag not allowed in strict mode: {tag!r}', tag.context, self)
self.visitor.visit(tag)
return self.visitor.root | def parse(self):
'Parser the template for later rendering.\n\n Returns:\n The root tag for later rendering\n '
logger.debug('%s- PARSING %r ...', (self.context.level * LIQUID_LOG_INDENT), self.context.name)
while True:
scanned = self.nodescanner.consume(self.context.stream)
if (scanned is False):
self.visitor.root.parse()
logger.debug('%s END PARSING.', (self.context.level * LIQUID_LOG_INDENT))
break
if (scanned is True):
continue
tag = scanned.tag
if ((not tag.SECURE) and self.config.strict):
raise LiquidSyntaxError(f'Tag not allowed in strict mode: {tag!r}', tag.context, self)
self.visitor.visit(tag)
return self.visitor.root<|docstring|>Parser the template for later rendering.
Returns:
The root tag for later rendering<|endoftext|> |
7a152fab45e0f12a1ba71088877c6da440288cff5b8955b14ae25d6bea03001a | def process_op_for_jump(op: SsbOperation, known_labels: Dict[(int, SsbLabel)], routine_id: int) -> SsbOperation:
'\n Processes the operation.\n If it doesn\'t contain a jump to a memory offset, op is simply returned.\n\n Otherwise, a label for the jump location is searched in known_labels.\n - If found: Returns a OperationSubtree with a copy of op as root, and the label op as subtree.\n The param with the jump offset is removed from the op copy.\n - If not found: A new label with an auto-incremented id is generated and added to the known_labels.\n Then: see above for "if found".\n '
if (op.op_code.name in OPS_WITH_JUMP_TO_MEM_OFFSET.keys()):
param_list = (op.params if isinstance(op.params, list) else list(op.params.values()))
jump_param_idx = OPS_WITH_JUMP_TO_MEM_OFFSET[op.op_code.name]
if (len(param_list) < jump_param_idx):
raise ValueError(f'The parameters for the OpCode {op.op_code.name} must contain a jump address at index {jump_param_idx}.')
old_offset = param_list[jump_param_idx]
if (old_offset in known_labels):
label = known_labels[old_offset]
if (routine_id != label.routine_id):
label.referenced_from_other_routine = True
else:
if (len(known_labels) == 0):
next_label_id = 0
else:
next_label_id = (max((label.id for label in known_labels.values())) + 1)
label = SsbLabel(next_label_id, routine_id)
known_labels[old_offset] = label
new_params = param_list.copy()
del new_params[jump_param_idx]
jmp = SsbLabelJump(SsbOperation(op.offset, op.op_code, new_params), label)
if (op.op_code.name == OP_CALL):
jmp.markers.append(CallJump())
return jmp
return op | Processes the operation.
If it doesn't contain a jump to a memory offset, op is simply returned.
Otherwise, a label for the jump location is searched in known_labels.
- If found: Returns a OperationSubtree with a copy of op as root, and the label op as subtree.
The param with the jump offset is removed from the op copy.
- If not found: A new label with an auto-incremented id is generated and added to the known_labels.
Then: see above for "if found". | explorerscript/ssb_converting/ssb_special_ops.py | process_op_for_jump | End45/ExplorerScript | 11 | python | def process_op_for_jump(op: SsbOperation, known_labels: Dict[(int, SsbLabel)], routine_id: int) -> SsbOperation:
'\n Processes the operation.\n If it doesn\'t contain a jump to a memory offset, op is simply returned.\n\n Otherwise, a label for the jump location is searched in known_labels.\n - If found: Returns a OperationSubtree with a copy of op as root, and the label op as subtree.\n The param with the jump offset is removed from the op copy.\n - If not found: A new label with an auto-incremented id is generated and added to the known_labels.\n Then: see above for "if found".\n '
if (op.op_code.name in OPS_WITH_JUMP_TO_MEM_OFFSET.keys()):
param_list = (op.params if isinstance(op.params, list) else list(op.params.values()))
jump_param_idx = OPS_WITH_JUMP_TO_MEM_OFFSET[op.op_code.name]
if (len(param_list) < jump_param_idx):
raise ValueError(f'The parameters for the OpCode {op.op_code.name} must contain a jump address at index {jump_param_idx}.')
old_offset = param_list[jump_param_idx]
if (old_offset in known_labels):
label = known_labels[old_offset]
if (routine_id != label.routine_id):
label.referenced_from_other_routine = True
else:
if (len(known_labels) == 0):
next_label_id = 0
else:
next_label_id = (max((label.id for label in known_labels.values())) + 1)
label = SsbLabel(next_label_id, routine_id)
known_labels[old_offset] = label
new_params = param_list.copy()
del new_params[jump_param_idx]
jmp = SsbLabelJump(SsbOperation(op.offset, op.op_code, new_params), label)
if (op.op_code.name == OP_CALL):
jmp.markers.append(CallJump())
return jmp
return op | def process_op_for_jump(op: SsbOperation, known_labels: Dict[(int, SsbLabel)], routine_id: int) -> SsbOperation:
'\n Processes the operation.\n If it doesn\'t contain a jump to a memory offset, op is simply returned.\n\n Otherwise, a label for the jump location is searched in known_labels.\n - If found: Returns a OperationSubtree with a copy of op as root, and the label op as subtree.\n The param with the jump offset is removed from the op copy.\n - If not found: A new label with an auto-incremented id is generated and added to the known_labels.\n Then: see above for "if found".\n '
if (op.op_code.name in OPS_WITH_JUMP_TO_MEM_OFFSET.keys()):
param_list = (op.params if isinstance(op.params, list) else list(op.params.values()))
jump_param_idx = OPS_WITH_JUMP_TO_MEM_OFFSET[op.op_code.name]
if (len(param_list) < jump_param_idx):
raise ValueError(f'The parameters for the OpCode {op.op_code.name} must contain a jump address at index {jump_param_idx}.')
old_offset = param_list[jump_param_idx]
if (old_offset in known_labels):
label = known_labels[old_offset]
if (routine_id != label.routine_id):
label.referenced_from_other_routine = True
else:
if (len(known_labels) == 0):
next_label_id = 0
else:
next_label_id = (max((label.id for label in known_labels.values())) + 1)
label = SsbLabel(next_label_id, routine_id)
known_labels[old_offset] = label
new_params = param_list.copy()
del new_params[jump_param_idx]
jmp = SsbLabelJump(SsbOperation(op.offset, op.op_code, new_params), label)
if (op.op_code.name == OP_CALL):
jmp.markers.append(CallJump())
return jmp
return op<|docstring|>Processes the operation.
If it doesn't contain a jump to a memory offset, op is simply returned.
Otherwise, a label for the jump location is searched in known_labels.
- If found: Returns a OperationSubtree with a copy of op as root, and the label op as subtree.
The param with the jump offset is removed from the op copy.
- If not found: A new label with an auto-incremented id is generated and added to the known_labels.
Then: see above for "if found".<|endoftext|> |
ab8889f261877ee6e1b1dbf6a0b941884600f6e5be042e35a8fb3c11df84cd24 | def add_if(self, ssb_if: SsbOperation):
'Add the ORIGINAL opcodes (NOT SsbLabelJump, but their ROOT) to this list of ifs.'
self.original_ssb_ifs_ops.append(ssb_if) | Add the ORIGINAL opcodes (NOT SsbLabelJump, but their ROOT) to this list of ifs. | explorerscript/ssb_converting/ssb_special_ops.py | add_if | End45/ExplorerScript | 11 | python | def add_if(self, ssb_if: SsbOperation):
self.original_ssb_ifs_ops.append(ssb_if) | def add_if(self, ssb_if: SsbOperation):
self.original_ssb_ifs_ops.append(ssb_if)<|docstring|>Add the ORIGINAL opcodes (NOT SsbLabelJump, but their ROOT) to this list of ifs.<|endoftext|> |
ae3366e88318c4f805d54107bd48bb51fa77ac9c3568d17f928649ae8d4c40a4 | def needs_to_be_printed(self, my_vertex_index: int, number_in_vs: int, graph: Graph):
'If the number of incoming vertices is bigger than max_in_vs, then we need to print this label'
return (not any([isinstance(m, SwitchFalltrough) for m in self.markers]))
if (self.force_write or (my_vertex_index == 0) or self.referenced_from_other_routine):
return True
max_in_vs = 1
for m in self.markers:
if isinstance(m, SwitchFalltrough):
return False
if isinstance(m, IfEnd):
max_in_vs += 1
if isinstance(m, SwitchEnd):
start: Vertex = self._find_switch_start_vertex(graph, m.switch_id)
if (not start):
raise ValueError(f'Start for switch {m.switch_id} not found.')
max_in_vs += (len(start.out_edges()) - 1)
return (number_in_vs > max_in_vs) | If the number of incoming vertices is bigger than max_in_vs, then we need to print this label | explorerscript/ssb_converting/ssb_special_ops.py | needs_to_be_printed | End45/ExplorerScript | 11 | python | def needs_to_be_printed(self, my_vertex_index: int, number_in_vs: int, graph: Graph):
return (not any([isinstance(m, SwitchFalltrough) for m in self.markers]))
if (self.force_write or (my_vertex_index == 0) or self.referenced_from_other_routine):
return True
max_in_vs = 1
for m in self.markers:
if isinstance(m, SwitchFalltrough):
return False
if isinstance(m, IfEnd):
max_in_vs += 1
if isinstance(m, SwitchEnd):
start: Vertex = self._find_switch_start_vertex(graph, m.switch_id)
if (not start):
raise ValueError(f'Start for switch {m.switch_id} not found.')
max_in_vs += (len(start.out_edges()) - 1)
return (number_in_vs > max_in_vs) | def needs_to_be_printed(self, my_vertex_index: int, number_in_vs: int, graph: Graph):
return (not any([isinstance(m, SwitchFalltrough) for m in self.markers]))
if (self.force_write or (my_vertex_index == 0) or self.referenced_from_other_routine):
return True
max_in_vs = 1
for m in self.markers:
if isinstance(m, SwitchFalltrough):
return False
if isinstance(m, IfEnd):
max_in_vs += 1
if isinstance(m, SwitchEnd):
start: Vertex = self._find_switch_start_vertex(graph, m.switch_id)
if (not start):
raise ValueError(f'Start for switch {m.switch_id} not found.')
max_in_vs += (len(start.out_edges()) - 1)
return (number_in_vs > max_in_vs)<|docstring|>If the number of incoming vertices is bigger than max_in_vs, then we need to print this label<|endoftext|> |
0a002237c1e314cb7acec2dbcc8fda7f62fdb3bb6a5ee400450e5a1a037b52de | def remove_marker(self):
'Remove the first (and only) marker if exists.'
if (len(self.markers) > 0):
del self.markers[0] | Remove the first (and only) marker if exists. | explorerscript/ssb_converting/ssb_special_ops.py | remove_marker | End45/ExplorerScript | 11 | python | def remove_marker(self):
if (len(self.markers) > 0):
del self.markers[0] | def remove_marker(self):
if (len(self.markers) > 0):
del self.markers[0]<|docstring|>Remove the first (and only) marker if exists.<|endoftext|> |
b12484ddf526854d9201c2aa7713dc26cd1f0ddd127e4c7d08f6e8930c5eacdc | def get_marker(self):
'Returns the first (and only) marker if exists, otherwise None.'
if (len(self.markers) > 0):
return self.markers[0]
return None | Returns the first (and only) marker if exists, otherwise None. | explorerscript/ssb_converting/ssb_special_ops.py | get_marker | End45/ExplorerScript | 11 | python | def get_marker(self):
if (len(self.markers) > 0):
return self.markers[0]
return None | def get_marker(self):
if (len(self.markers) > 0):
return self.markers[0]
return None<|docstring|>Returns the first (and only) marker if exists, otherwise None.<|endoftext|> |
1a44638036cfb6bae2c4193e75e777defc982b76bd2fedbddd81c32d6e608f23 | def get_change(first, second):
'\n Get change in percentage between two values\n '
if (first == second):
return 0
try:
return ((abs((first - second)) / second) * 100.0)
except ZeroDivisionError:
return float('inf') | Get change in percentage between two values | graphs/perception/perception_2nodes/launch/analyse_rectify_resize.launch.py | get_change | dirksavage88/acceleration_examples | 0 | python | def get_change(first, second):
'\n \n '
if (first == second):
return 0
try:
return ((abs((first - second)) / second) * 100.0)
except ZeroDivisionError:
return float('inf') | def get_change(first, second):
'\n \n '
if (first == second):
return 0
try:
return ((abs((first - second)) / second) * 100.0)
except ZeroDivisionError:
return float('inf')<|docstring|>Get change in percentage between two values<|endoftext|> |
0692f7be9ce9471019a38d8fce2f8a2049c07059aecb9c3b5249c57da731b35e | def barchart_data(image_pipeline_msg_sets):
'Converts a tracing message list into its corresponding\n relative (to the previous tracepoint) latency list in\n millisecond units.\n\n Args:\n image_pipeline_msg_sets ([type]): [description]\n\n Returns:\n list: list of relative latencies, in ms\n '
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
if (msg_index == 0):
previous = target_chain_ns[0]
else:
previous = target_chain_ns[(msg_index - 1)]
aux_set.append(((target_chain_ns[msg_index] - previous) / 1000000.0))
image_pipeline_msg_sets_ns.append(aux_set)
return image_pipeline_msg_sets_ns | Converts a tracing message list into its corresponding
relative (to the previous tracepoint) latency list in
millisecond units.
Args:
image_pipeline_msg_sets ([type]): [description]
Returns:
list: list of relative latencies, in ms | graphs/perception/perception_2nodes/launch/analyse_rectify_resize.launch.py | barchart_data | dirksavage88/acceleration_examples | 0 | python | def barchart_data(image_pipeline_msg_sets):
'Converts a tracing message list into its corresponding\n relative (to the previous tracepoint) latency list in\n millisecond units.\n\n Args:\n image_pipeline_msg_sets ([type]): [description]\n\n Returns:\n list: list of relative latencies, in ms\n '
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
if (msg_index == 0):
previous = target_chain_ns[0]
else:
previous = target_chain_ns[(msg_index - 1)]
aux_set.append(((target_chain_ns[msg_index] - previous) / 1000000.0))
image_pipeline_msg_sets_ns.append(aux_set)
return image_pipeline_msg_sets_ns | def barchart_data(image_pipeline_msg_sets):
'Converts a tracing message list into its corresponding\n relative (to the previous tracepoint) latency list in\n millisecond units.\n\n Args:\n image_pipeline_msg_sets ([type]): [description]\n\n Returns:\n list: list of relative latencies, in ms\n '
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
if (msg_index == 0):
previous = target_chain_ns[0]
else:
previous = target_chain_ns[(msg_index - 1)]
aux_set.append(((target_chain_ns[msg_index] - previous) / 1000000.0))
image_pipeline_msg_sets_ns.append(aux_set)
return image_pipeline_msg_sets_ns<|docstring|>Converts a tracing message list into its corresponding
relative (to the previous tracepoint) latency list in
millisecond units.
Args:
image_pipeline_msg_sets ([type]): [description]
Returns:
list: list of relative latencies, in ms<|endoftext|> |
8e3ac01fda876ea0f61edbbdb1a8887fb6e63cfd023dfc3f5054437d04d2b440 | def rms_sets(image_pipeline_msg_sets, indices=None):
'\n Root-Mean-Square (RMS) (in the units provided) for a\n given number of time trace sets.\n\n NOTE: last value of the lists should not include the total\n\n :param: image_pipeline_msg_sets, list of lists, each containing the time traces\n :param: indices, list of indices to consider on each set which will be summed\n for rms. By default, sum of all values on each set.\n '
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return rms(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return rms(total_in_sets) | Root-Mean-Square (RMS) (in the units provided) for a
given number of time trace sets.
NOTE: last value of the lists should not include the total
:param: image_pipeline_msg_sets, list of lists, each containing the time traces
:param: indices, list of indices to consider on each set which will be summed
for rms. By default, sum of all values on each set. | graphs/perception/perception_2nodes/launch/analyse_rectify_resize.launch.py | rms_sets | dirksavage88/acceleration_examples | 0 | python | def rms_sets(image_pipeline_msg_sets, indices=None):
'\n Root-Mean-Square (RMS) (in the units provided) for a\n given number of time trace sets.\n\n NOTE: last value of the lists should not include the total\n\n :param: image_pipeline_msg_sets, list of lists, each containing the time traces\n :param: indices, list of indices to consider on each set which will be summed\n for rms. By default, sum of all values on each set.\n '
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return rms(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return rms(total_in_sets) | def rms_sets(image_pipeline_msg_sets, indices=None):
'\n Root-Mean-Square (RMS) (in the units provided) for a\n given number of time trace sets.\n\n NOTE: last value of the lists should not include the total\n\n :param: image_pipeline_msg_sets, list of lists, each containing the time traces\n :param: indices, list of indices to consider on each set which will be summed\n for rms. By default, sum of all values on each set.\n '
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return rms(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return rms(total_in_sets)<|docstring|>Root-Mean-Square (RMS) (in the units provided) for a
given number of time trace sets.
NOTE: last value of the lists should not include the total
:param: image_pipeline_msg_sets, list of lists, each containing the time traces
:param: indices, list of indices to consider on each set which will be summed
for rms. By default, sum of all values on each set.<|endoftext|> |
a430bbb3e06cba6f4c415ebb526dd1b0494645d2eb18eabc71f77c4a89dfd732 | def print_timeline_average(image_pipeline_msg_sets):
'\n Doing averages may lead to negative numbers while substracting the previous average.\n This is only useful to get an intuition of the totals.\n '
global target_chain
global target_chain_colors_fg
image_pipeline_msg_sets_ns = []
for msg_set in image_pipeline_msg_sets:
if (len(msg_set) != len(target_chain)):
print(color(('Not a complete set: ' + str([x.event.name for x in msg_set])), fg='red'))
pass
else:
target_chain_ns = []
final_target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = ([init_ns] + target_chain_ns)
for msg_index in range(len(msg_set)):
final_target_chain_ns.append((fixed_target_chain_ns[(msg_index + 1)] - fixed_target_chain_ns[msg_index]))
final_target_chain_ns.append((fixed_target_chain_ns[(- 1)] - fixed_target_chain_ns[0]))
image_pipeline_msg_sets_ns.append(final_target_chain_ns)
image_pipeline_msg_ns_average = [(sum(x) / len(x)) for x in zip(*image_pipeline_msg_sets_ns)]
stringout = color('raw image ')
for msg_index in range(len(image_pipeline_msg_ns_average[:(- 1)])):
stringout += (' β ' + color((image_pipeline_msg_sets[0][msg_index].event.name + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(msg_index + 1)] - image_pipeline_msg_ns_average[msg_index]) / 1000000.0))), fg=target_chain_colors_fg[msg_index], bg='black'))
stringout += color(('total ' + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(- 1)] - image_pipeline_msg_ns_average[0]) / 1000000.0))), fg='black', bg='white')
print(stringout) | Doing averages may lead to negative numbers while substracting the previous average.
This is only useful to get an intuition of the totals. | graphs/perception/perception_2nodes/launch/analyse_rectify_resize.launch.py | print_timeline_average | dirksavage88/acceleration_examples | 0 | python | def print_timeline_average(image_pipeline_msg_sets):
'\n Doing averages may lead to negative numbers while substracting the previous average.\n This is only useful to get an intuition of the totals.\n '
global target_chain
global target_chain_colors_fg
image_pipeline_msg_sets_ns = []
for msg_set in image_pipeline_msg_sets:
if (len(msg_set) != len(target_chain)):
print(color(('Not a complete set: ' + str([x.event.name for x in msg_set])), fg='red'))
pass
else:
target_chain_ns = []
final_target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = ([init_ns] + target_chain_ns)
for msg_index in range(len(msg_set)):
final_target_chain_ns.append((fixed_target_chain_ns[(msg_index + 1)] - fixed_target_chain_ns[msg_index]))
final_target_chain_ns.append((fixed_target_chain_ns[(- 1)] - fixed_target_chain_ns[0]))
image_pipeline_msg_sets_ns.append(final_target_chain_ns)
image_pipeline_msg_ns_average = [(sum(x) / len(x)) for x in zip(*image_pipeline_msg_sets_ns)]
stringout = color('raw image ')
for msg_index in range(len(image_pipeline_msg_ns_average[:(- 1)])):
stringout += (' β ' + color((image_pipeline_msg_sets[0][msg_index].event.name + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(msg_index + 1)] - image_pipeline_msg_ns_average[msg_index]) / 1000000.0))), fg=target_chain_colors_fg[msg_index], bg='black'))
stringout += color(('total ' + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(- 1)] - image_pipeline_msg_ns_average[0]) / 1000000.0))), fg='black', bg='white')
print(stringout) | def print_timeline_average(image_pipeline_msg_sets):
'\n Doing averages may lead to negative numbers while substracting the previous average.\n This is only useful to get an intuition of the totals.\n '
global target_chain
global target_chain_colors_fg
image_pipeline_msg_sets_ns = []
for msg_set in image_pipeline_msg_sets:
if (len(msg_set) != len(target_chain)):
print(color(('Not a complete set: ' + str([x.event.name for x in msg_set])), fg='red'))
pass
else:
target_chain_ns = []
final_target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = ([init_ns] + target_chain_ns)
for msg_index in range(len(msg_set)):
final_target_chain_ns.append((fixed_target_chain_ns[(msg_index + 1)] - fixed_target_chain_ns[msg_index]))
final_target_chain_ns.append((fixed_target_chain_ns[(- 1)] - fixed_target_chain_ns[0]))
image_pipeline_msg_sets_ns.append(final_target_chain_ns)
image_pipeline_msg_ns_average = [(sum(x) / len(x)) for x in zip(*image_pipeline_msg_sets_ns)]
stringout = color('raw image ')
for msg_index in range(len(image_pipeline_msg_ns_average[:(- 1)])):
stringout += (' β ' + color((image_pipeline_msg_sets[0][msg_index].event.name + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(msg_index + 1)] - image_pipeline_msg_ns_average[msg_index]) / 1000000.0))), fg=target_chain_colors_fg[msg_index], bg='black'))
stringout += color(('total ' + ' ({} ms) '.format(((image_pipeline_msg_ns_average[(- 1)] - image_pipeline_msg_ns_average[0]) / 1000000.0))), fg='black', bg='white')
print(stringout)<|docstring|>Doing averages may lead to negative numbers while substracting the previous average.
This is only useful to get an intuition of the totals.<|endoftext|> |
295ecb614fa78e19d44624091bb55bb57b4a0a78f71220d0465d7f0791b3cd5a | def table(list_sets, list_sets_names):
'\n Creates a markdown table from a list of sets\n\n NOTE: assumes base is always the first set in list_sets, which\n is then used to calculate % of change.\n '
list_statistics = []
for sets in list_sets:
list_statistics.append(statistics(sets))
for stat_list_index in range(len(list_statistics)):
list_statistics[stat_list_index].insert(0, list_sets_names[stat_list_index])
list_statistics.insert(0, ['---', '---', '---', '---', '---', '---', '---', '---', '---'])
list_statistics.insert(0, [' ', 'Accel. Mean', 'Accel. RMS', 'Accel. Max ', 'Accel. Min', 'Mean', 'RMS', 'Max', 'Min'])
baseline = list_statistics[2]
length_list = [len(row) for row in list_statistics]
column_width = max(length_list)
count = 0
for row in list_statistics:
row_str = ' | '
if (count == 2):
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
else:
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
count += 1
print(row_str) | Creates a markdown table from a list of sets
NOTE: assumes base is always the first set in list_sets, which
is then used to calculate % of change. | graphs/perception/perception_2nodes/launch/analyse_rectify_resize.launch.py | table | dirksavage88/acceleration_examples | 0 | python | def table(list_sets, list_sets_names):
'\n Creates a markdown table from a list of sets\n\n NOTE: assumes base is always the first set in list_sets, which\n is then used to calculate % of change.\n '
list_statistics = []
for sets in list_sets:
list_statistics.append(statistics(sets))
for stat_list_index in range(len(list_statistics)):
list_statistics[stat_list_index].insert(0, list_sets_names[stat_list_index])
list_statistics.insert(0, ['---', '---', '---', '---', '---', '---', '---', '---', '---'])
list_statistics.insert(0, [' ', 'Accel. Mean', 'Accel. RMS', 'Accel. Max ', 'Accel. Min', 'Mean', 'RMS', 'Max', 'Min'])
baseline = list_statistics[2]
length_list = [len(row) for row in list_statistics]
column_width = max(length_list)
count = 0
for row in list_statistics:
row_str = ' | '
if (count == 2):
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
else:
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
count += 1
print(row_str) | def table(list_sets, list_sets_names):
'\n Creates a markdown table from a list of sets\n\n NOTE: assumes base is always the first set in list_sets, which\n is then used to calculate % of change.\n '
list_statistics = []
for sets in list_sets:
list_statistics.append(statistics(sets))
for stat_list_index in range(len(list_statistics)):
list_statistics[stat_list_index].insert(0, list_sets_names[stat_list_index])
list_statistics.insert(0, ['---', '---', '---', '---', '---', '---', '---', '---', '---'])
list_statistics.insert(0, [' ', 'Accel. Mean', 'Accel. RMS', 'Accel. Max ', 'Accel. Min', 'Mean', 'RMS', 'Max', 'Min'])
baseline = list_statistics[2]
length_list = [len(row) for row in list_statistics]
column_width = max(length_list)
count = 0
for row in list_statistics:
row_str = ' | '
if (count == 2):
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('**{:.2f}** ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
else:
for element_index in range(len(row)):
if (type(row[element_index]) != str):
if (row[element_index] > baseline[element_index]):
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (:small_red_triangle_down: `') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += ((('{:.2f} ms'.format(row[element_index]) + ' (`') + '{:.2f}'.format(get_change(row[element_index], baseline[element_index]))) + '`%) | ')
else:
row_str += (row[element_index] + ' | ')
count += 1
print(row_str)<|docstring|>Creates a markdown table from a list of sets
NOTE: assumes base is always the first set in list_sets, which
is then used to calculate % of change.<|endoftext|> |
bf99d90b0e3e10a665df03b3d1e1fe6e8533b73f433d321d90452a27446b8821 | def tabular(client, datasets, *, columns=None):
'Format datasets with a tabular output.'
if (not columns):
columns = 'id,created,short_name,creators,tags,version'
return tabulate(collection=datasets, columns=columns, columns_mapping=DATASETS_COLUMNS) | Format datasets with a tabular output. | renku/core/commands/format/datasets.py | tabular | lorenzo-cavazzi/renku-python | 0 | python | def tabular(client, datasets, *, columns=None):
if (not columns):
columns = 'id,created,short_name,creators,tags,version'
return tabulate(collection=datasets, columns=columns, columns_mapping=DATASETS_COLUMNS) | def tabular(client, datasets, *, columns=None):
if (not columns):
columns = 'id,created,short_name,creators,tags,version'
return tabulate(collection=datasets, columns=columns, columns_mapping=DATASETS_COLUMNS)<|docstring|>Format datasets with a tabular output.<|endoftext|> |
df09198e21ce9a59be6755873c3a3da027c797b13f05b89eb9ea990bff5a9452 | def jsonld(client, datasets, **kwargs):
'Format datasets as JSON-LD.'
data = [asjsonld(dataset, basedir=os.path.relpath('.', start=str(dataset.__reference__.parent))) for dataset in datasets]
return dumps(data, indent=2) | Format datasets as JSON-LD. | renku/core/commands/format/datasets.py | jsonld | lorenzo-cavazzi/renku-python | 0 | python | def jsonld(client, datasets, **kwargs):
data = [asjsonld(dataset, basedir=os.path.relpath('.', start=str(dataset.__reference__.parent))) for dataset in datasets]
return dumps(data, indent=2) | def jsonld(client, datasets, **kwargs):
data = [asjsonld(dataset, basedir=os.path.relpath('.', start=str(dataset.__reference__.parent))) for dataset in datasets]
return dumps(data, indent=2)<|docstring|>Format datasets as JSON-LD.<|endoftext|> |
9bfb5e3b7c566565a8f5a8ef976af899799b0f28c9a007237ae6a38e541caabe | def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:
'\n In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and\n setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`\n '
batch_norm_without_running_stats(root)
for obj in root.modules():
batch_norm_without_running_stats(obj)
return root | In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and
setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root` | functorch/experimental/batch_norm_replacement.py | replace_all_batch_norm_modules_ | bryant1410/functorch | 423 | python | def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:
'\n In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and\n setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`\n '
batch_norm_without_running_stats(root)
for obj in root.modules():
batch_norm_without_running_stats(obj)
return root | def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:
'\n In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and\n setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`\n '
batch_norm_without_running_stats(root)
for obj in root.modules():
batch_norm_without_running_stats(obj)
return root<|docstring|>In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and
setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`<|endoftext|> |
05faef4732e3638d36b4976b426e4be1450f7958ea14c20e8eee7eae34fa56d6 | def compileShaders(self, vertShader, indexed=False):
'Compiles the vertex/fragment shader programs (by creating a\n :class:`.GLSLShader` instance).\n\n If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``\n fragment shader is used. Otherwise, the ``glvector`` fragment shader\n is used.\n '
if (self.shader is not None):
self.shader.destroy()
opts = self.opts
useVolumeFragShader = (opts.colourImage is not None)
self.useVolumeFragShader = useVolumeFragShader
if useVolumeFragShader:
fragShader = 'glvolume'
else:
fragShader = 'glvector'
vertSrc = shaders.getVertexShader(vertShader)
fragSrc = shaders.getFragmentShader(fragShader)
return shaders.GLSLShader(vertSrc, fragSrc, indexed) | Compiles the vertex/fragment shader programs (by creating a
:class:`.GLSLShader` instance).
If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``
fragment shader is used. Otherwise, the ``glvector`` fragment shader
is used. | fsleyes/gl/gl21/glvector_funcs.py | compileShaders | pauldmccarthy/fsleyes | 12 | python | def compileShaders(self, vertShader, indexed=False):
'Compiles the vertex/fragment shader programs (by creating a\n :class:`.GLSLShader` instance).\n\n If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``\n fragment shader is used. Otherwise, the ``glvector`` fragment shader\n is used.\n '
if (self.shader is not None):
self.shader.destroy()
opts = self.opts
useVolumeFragShader = (opts.colourImage is not None)
self.useVolumeFragShader = useVolumeFragShader
if useVolumeFragShader:
fragShader = 'glvolume'
else:
fragShader = 'glvector'
vertSrc = shaders.getVertexShader(vertShader)
fragSrc = shaders.getFragmentShader(fragShader)
return shaders.GLSLShader(vertSrc, fragSrc, indexed) | def compileShaders(self, vertShader, indexed=False):
'Compiles the vertex/fragment shader programs (by creating a\n :class:`.GLSLShader` instance).\n\n If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``\n fragment shader is used. Otherwise, the ``glvector`` fragment shader\n is used.\n '
if (self.shader is not None):
self.shader.destroy()
opts = self.opts
useVolumeFragShader = (opts.colourImage is not None)
self.useVolumeFragShader = useVolumeFragShader
if useVolumeFragShader:
fragShader = 'glvolume'
else:
fragShader = 'glvector'
vertSrc = shaders.getVertexShader(vertShader)
fragSrc = shaders.getFragmentShader(fragShader)
return shaders.GLSLShader(vertSrc, fragSrc, indexed)<|docstring|>Compiles the vertex/fragment shader programs (by creating a
:class:`.GLSLShader` instance).
If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``
fragment shader is used. Otherwise, the ``glvector`` fragment shader
is used.<|endoftext|> |
ffd884c3cd187c541bac96edd8997ea62a6e8bfc571a710657233adb92512dcf | def updateShaderState(self, useSpline=False):
'Updates the state of the vector vertex fragment shader. The fragment\n shader may be either the ``glvolume`` or the ``glvector`` shader.\n '
changed = False
opts = self.opts
shader = self.shader
imageShape = self.vectorImage.shape[:3]
(modLow, modHigh) = self.getModulateRange()
(clipLow, clipHigh) = self.getClippingRange()
if (opts.modulateImage is None):
modShape = [1, 1, 1]
else:
modShape = opts.modulateImage.shape[:3]
if (opts.clipImage is None):
clipShape = [1, 1, 1]
else:
clipShape = opts.clipImage.shape[:3]
modMode = {'brightness': 0, 'alpha': 1}[opts.modulateMode]
clipXform = self.getAuxTextureXform('clip')
colourXform = self.getAuxTextureXform('colour')
modXform = self.getAuxTextureXform('modulate')
changed |= self.shader.set('clipCoordXform', clipXform)
changed |= self.shader.set('colourCoordXform', colourXform)
changed |= self.shader.set('modCoordXform', modXform)
if self.useVolumeFragShader:
voxValXform = self.colourTexture.voxValXform
img2CmapXform = affine.concat(self.cmapTexture.getCoordinateTransform(), voxValXform)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('imageTexture', 2)
changed |= shader.set('colourTexture', 3)
changed |= shader.set('negColourTexture', 3)
changed |= shader.set('img2CmapXform', img2CmapXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('imageIsClip', False)
changed |= shader.set('useNegCmap', False)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('invertClip', False)
else:
if (self.vectorImage.niftiDataType == constants.NIFTI_DT_RGB24):
voxValXform = affine.scaleOffsetXform(2, (- 1))
else:
voxValXform = self.imageTexture.voxValXform
(colours, colourXform) = self.getVectorColours()
changed |= shader.set('modulateTexture', 0)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('vectorTexture', 4)
changed |= shader.set('xColour', colours[0])
changed |= shader.set('yColour', colours[1])
changed |= shader.set('zColour', colours[2])
changed |= shader.set('colourXform', colourXform)
changed |= shader.set('voxValXform', voxValXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('modImageShape', modShape)
changed |= shader.set('clipImageShape', clipShape)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('modLow', modLow)
changed |= shader.set('modHigh', modHigh)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('modulateMode', modMode)
return changed | Updates the state of the vector vertex fragment shader. The fragment
shader may be either the ``glvolume`` or the ``glvector`` shader. | fsleyes/gl/gl21/glvector_funcs.py | updateShaderState | pauldmccarthy/fsleyes | 12 | python | def updateShaderState(self, useSpline=False):
'Updates the state of the vector vertex fragment shader. The fragment\n shader may be either the ``glvolume`` or the ``glvector`` shader.\n '
changed = False
opts = self.opts
shader = self.shader
imageShape = self.vectorImage.shape[:3]
(modLow, modHigh) = self.getModulateRange()
(clipLow, clipHigh) = self.getClippingRange()
if (opts.modulateImage is None):
modShape = [1, 1, 1]
else:
modShape = opts.modulateImage.shape[:3]
if (opts.clipImage is None):
clipShape = [1, 1, 1]
else:
clipShape = opts.clipImage.shape[:3]
modMode = {'brightness': 0, 'alpha': 1}[opts.modulateMode]
clipXform = self.getAuxTextureXform('clip')
colourXform = self.getAuxTextureXform('colour')
modXform = self.getAuxTextureXform('modulate')
changed |= self.shader.set('clipCoordXform', clipXform)
changed |= self.shader.set('colourCoordXform', colourXform)
changed |= self.shader.set('modCoordXform', modXform)
if self.useVolumeFragShader:
voxValXform = self.colourTexture.voxValXform
img2CmapXform = affine.concat(self.cmapTexture.getCoordinateTransform(), voxValXform)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('imageTexture', 2)
changed |= shader.set('colourTexture', 3)
changed |= shader.set('negColourTexture', 3)
changed |= shader.set('img2CmapXform', img2CmapXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('imageIsClip', False)
changed |= shader.set('useNegCmap', False)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('invertClip', False)
else:
if (self.vectorImage.niftiDataType == constants.NIFTI_DT_RGB24):
voxValXform = affine.scaleOffsetXform(2, (- 1))
else:
voxValXform = self.imageTexture.voxValXform
(colours, colourXform) = self.getVectorColours()
changed |= shader.set('modulateTexture', 0)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('vectorTexture', 4)
changed |= shader.set('xColour', colours[0])
changed |= shader.set('yColour', colours[1])
changed |= shader.set('zColour', colours[2])
changed |= shader.set('colourXform', colourXform)
changed |= shader.set('voxValXform', voxValXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('modImageShape', modShape)
changed |= shader.set('clipImageShape', clipShape)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('modLow', modLow)
changed |= shader.set('modHigh', modHigh)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('modulateMode', modMode)
return changed | def updateShaderState(self, useSpline=False):
'Updates the state of the vector vertex fragment shader. The fragment\n shader may be either the ``glvolume`` or the ``glvector`` shader.\n '
changed = False
opts = self.opts
shader = self.shader
imageShape = self.vectorImage.shape[:3]
(modLow, modHigh) = self.getModulateRange()
(clipLow, clipHigh) = self.getClippingRange()
if (opts.modulateImage is None):
modShape = [1, 1, 1]
else:
modShape = opts.modulateImage.shape[:3]
if (opts.clipImage is None):
clipShape = [1, 1, 1]
else:
clipShape = opts.clipImage.shape[:3]
modMode = {'brightness': 0, 'alpha': 1}[opts.modulateMode]
clipXform = self.getAuxTextureXform('clip')
colourXform = self.getAuxTextureXform('colour')
modXform = self.getAuxTextureXform('modulate')
changed |= self.shader.set('clipCoordXform', clipXform)
changed |= self.shader.set('colourCoordXform', colourXform)
changed |= self.shader.set('modCoordXform', modXform)
if self.useVolumeFragShader:
voxValXform = self.colourTexture.voxValXform
img2CmapXform = affine.concat(self.cmapTexture.getCoordinateTransform(), voxValXform)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('imageTexture', 2)
changed |= shader.set('colourTexture', 3)
changed |= shader.set('negColourTexture', 3)
changed |= shader.set('img2CmapXform', img2CmapXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('imageIsClip', False)
changed |= shader.set('useNegCmap', False)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('invertClip', False)
else:
if (self.vectorImage.niftiDataType == constants.NIFTI_DT_RGB24):
voxValXform = affine.scaleOffsetXform(2, (- 1))
else:
voxValXform = self.imageTexture.voxValXform
(colours, colourXform) = self.getVectorColours()
changed |= shader.set('modulateTexture', 0)
changed |= shader.set('clipTexture', 1)
changed |= shader.set('vectorTexture', 4)
changed |= shader.set('xColour', colours[0])
changed |= shader.set('yColour', colours[1])
changed |= shader.set('zColour', colours[2])
changed |= shader.set('colourXform', colourXform)
changed |= shader.set('voxValXform', voxValXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('modImageShape', modShape)
changed |= shader.set('clipImageShape', clipShape)
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('modLow', modLow)
changed |= shader.set('modHigh', modHigh)
changed |= shader.set('useSpline', useSpline)
changed |= shader.set('modulateMode', modMode)
return changed<|docstring|>Updates the state of the vector vertex fragment shader. The fragment
shader may be either the ``glvolume`` or the ``glvector`` shader.<|endoftext|> |
2f2f24e3de47baf608a7ae73784dea17f66c43d2e06a465e4b1ba6202674f4fe | def corners_to_box(x0, y0, x1, y1):
'convert two corners (x0, y0, x1, y1) to (x, y, width, height)'
(x0, x1) = (min(x0, x1), max(x0, x1))
(y0, y1) = (min(y0, y1), max(y0, y1))
return (x0, y0, ((x1 - x0) + 1), ((y1 - y0) + 1)) | convert two corners (x0, y0, x1, y1) to (x, y, width, height) | termpixels/util.py | corners_to_box | loganzartman/termpixels | 17 | python | def corners_to_box(x0, y0, x1, y1):
(x0, x1) = (min(x0, x1), max(x0, x1))
(y0, y1) = (min(y0, y1), max(y0, y1))
return (x0, y0, ((x1 - x0) + 1), ((y1 - y0) + 1)) | def corners_to_box(x0, y0, x1, y1):
(x0, x1) = (min(x0, x1), max(x0, x1))
(y0, y1) = (min(y0, y1), max(y0, y1))
return (x0, y0, ((x1 - x0) + 1), ((y1 - y0) + 1))<|docstring|>convert two corners (x0, y0, x1, y1) to (x, y, width, height)<|endoftext|> |
2eaafc0176b8d223b0352e41a0f4ce375f969ae4a85a4317a23e642f8e891f2e | def set_ambiguous_is_wide(is_wide):
' set whether ambiguous characters are considered to be wide '
global _ambiguous_is_wide
if (_ambiguous_is_wide != is_wide):
_ambiguous_is_wide = is_wide
terminal_char_len.cache_clear() | set whether ambiguous characters are considered to be wide | termpixels/util.py | set_ambiguous_is_wide | loganzartman/termpixels | 17 | python | def set_ambiguous_is_wide(is_wide):
' '
global _ambiguous_is_wide
if (_ambiguous_is_wide != is_wide):
_ambiguous_is_wide = is_wide
terminal_char_len.cache_clear() | def set_ambiguous_is_wide(is_wide):
' '
global _ambiguous_is_wide
if (_ambiguous_is_wide != is_wide):
_ambiguous_is_wide = is_wide
terminal_char_len.cache_clear()<|docstring|>set whether ambiguous characters are considered to be wide<|endoftext|> |
cbafb8765a33ee94b6f7060c3b2e87d0767bd8d1b9d3a43f945fa5f4ab97e885 | @lru_cache(1024)
def terminal_char_len(ch):
' return the width of a character in terminal cells '
if (ch == '\t'):
return None
if (not terminal_printable(ch)):
return 0
wide = (['F', 'W', 'A'] if _ambiguous_is_wide else ['F', 'W'])
return (2 if (east_asian_width(ch) in wide) else 1) | return the width of a character in terminal cells | termpixels/util.py | terminal_char_len | loganzartman/termpixels | 17 | python | @lru_cache(1024)
def terminal_char_len(ch):
' '
if (ch == '\t'):
return None
if (not terminal_printable(ch)):
return 0
wide = (['F', 'W', 'A'] if _ambiguous_is_wide else ['F', 'W'])
return (2 if (east_asian_width(ch) in wide) else 1) | @lru_cache(1024)
def terminal_char_len(ch):
' '
if (ch == '\t'):
return None
if (not terminal_printable(ch)):
return 0
wide = (['F', 'W', 'A'] if _ambiguous_is_wide else ['F', 'W'])
return (2 if (east_asian_width(ch) in wide) else 1)<|docstring|>return the width of a character in terminal cells<|endoftext|> |
ffaadc74bcebe06e5e3e95f9f11c2b2e94032540b1d4f5065ad6763c43d97937 | def terminal_len(s):
' return the width of a string in terminal cells '
return sum(map(terminal_char_len, s)) | return the width of a string in terminal cells | termpixels/util.py | terminal_len | loganzartman/termpixels | 17 | python | def terminal_len(s):
' '
return sum(map(terminal_char_len, s)) | def terminal_len(s):
' '
return sum(map(terminal_char_len, s))<|docstring|>return the width of a string in terminal cells<|endoftext|> |
f72981b04643fa96ad84c72670fdfcd5a1e4dc9e6a5d5c9f1f466550d785bdcb | def terminal_printable(ch):
' determine if a character is "printable" '
return (not category(ch).startswith('C')) | determine if a character is "printable" | termpixels/util.py | terminal_printable | loganzartman/termpixels | 17 | python | def terminal_printable(ch):
' '
return (not category(ch).startswith('C')) | def terminal_printable(ch):
' '
return (not category(ch).startswith('C'))<|docstring|>determine if a character is "printable"<|endoftext|> |
d2434ec1fbac4212befa5fa68b0d8ac01bc143740e340e03a12c9b6c648e224e | def splitlines_print(s):
' like str.splitlines() but keeps all empty lines '
return _newline_regex.split(s) | like str.splitlines() but keeps all empty lines | termpixels/util.py | splitlines_print | loganzartman/termpixels | 17 | python | def splitlines_print(s):
' '
return _newline_regex.split(s) | def splitlines_print(s):
' '
return _newline_regex.split(s)<|docstring|>like str.splitlines() but keeps all empty lines<|endoftext|> |
522cfad349f2bfd1059217adc834b364a4802ce2399a18527bb487757fe2ddc4 | def wrap_text(text, line_len, *, tab_size=4, word_sep=re.compile('\\s+|\\W'), break_word=False, hyphen='', newline='\n'):
' returns a terminal-line-wrapped version of text '
text = text.replace('\t', (' ' * tab_size))
hl = terminal_len(hyphen)
buf = []
i = 0
col = 0
while (i < len(text)):
match = word_sep.search(text, i)
word = text[i:]
sep = ''
if match:
word = text[i:match.start()]
sep = match.group(0)
i = match.end()
else:
i = len(text)
wl = terminal_len(word)
while ((col + wl) > line_len):
if ((break_word and (col < (line_len - hl))) or (col == 0)):
while ((col + terminal_char_len(word[0])) <= (line_len - hl)):
buf.append(word[0])
col += terminal_char_len(word[0])
word = word[1:]
buf.append(hyphen)
buf.append(newline)
col = 0
wl = terminal_len(word)
buf.append(word)
col += wl
sl = terminal_len(sep)
if ((col + sl) > line_len):
while ((col + terminal_char_len(sep[0])) <= line_len):
buf.append(sep[0])
col += terminal_char_len(sep[0])
sep = sep[1:]
buf.append(newline)
col = 0
else:
buf.append(sep)
col += sl
return ''.join(buf) | returns a terminal-line-wrapped version of text | termpixels/util.py | wrap_text | loganzartman/termpixels | 17 | python | def wrap_text(text, line_len, *, tab_size=4, word_sep=re.compile('\\s+|\\W'), break_word=False, hyphen=, newline='\n'):
' '
text = text.replace('\t', (' ' * tab_size))
hl = terminal_len(hyphen)
buf = []
i = 0
col = 0
while (i < len(text)):
match = word_sep.search(text, i)
word = text[i:]
sep =
if match:
word = text[i:match.start()]
sep = match.group(0)
i = match.end()
else:
i = len(text)
wl = terminal_len(word)
while ((col + wl) > line_len):
if ((break_word and (col < (line_len - hl))) or (col == 0)):
while ((col + terminal_char_len(word[0])) <= (line_len - hl)):
buf.append(word[0])
col += terminal_char_len(word[0])
word = word[1:]
buf.append(hyphen)
buf.append(newline)
col = 0
wl = terminal_len(word)
buf.append(word)
col += wl
sl = terminal_len(sep)
if ((col + sl) > line_len):
while ((col + terminal_char_len(sep[0])) <= line_len):
buf.append(sep[0])
col += terminal_char_len(sep[0])
sep = sep[1:]
buf.append(newline)
col = 0
else:
buf.append(sep)
col += sl
return .join(buf) | def wrap_text(text, line_len, *, tab_size=4, word_sep=re.compile('\\s+|\\W'), break_word=False, hyphen=, newline='\n'):
' '
text = text.replace('\t', (' ' * tab_size))
hl = terminal_len(hyphen)
buf = []
i = 0
col = 0
while (i < len(text)):
match = word_sep.search(text, i)
word = text[i:]
sep =
if match:
word = text[i:match.start()]
sep = match.group(0)
i = match.end()
else:
i = len(text)
wl = terminal_len(word)
while ((col + wl) > line_len):
if ((break_word and (col < (line_len - hl))) or (col == 0)):
while ((col + terminal_char_len(word[0])) <= (line_len - hl)):
buf.append(word[0])
col += terminal_char_len(word[0])
word = word[1:]
buf.append(hyphen)
buf.append(newline)
col = 0
wl = terminal_len(word)
buf.append(word)
col += wl
sl = terminal_len(sep)
if ((col + sl) > line_len):
while ((col + terminal_char_len(sep[0])) <= line_len):
buf.append(sep[0])
col += terminal_char_len(sep[0])
sep = sep[1:]
buf.append(newline)
col = 0
else:
buf.append(sep)
col += sl
return .join(buf)<|docstring|>returns a terminal-line-wrapped version of text<|endoftext|> |
54e9d7db9d44f067bbbcac1e8dc6f73ffe41eb6f7b1b94f43f1765265ed758e7 | @property
def is_active(self) -> bool:
'Return whether or not the hook is currently active (i.e., whether a Seagrass event that\n uses the hook is currently executing.)'
return self.__is_active | Return whether or not the hook is currently active (i.e., whether a Seagrass event that
uses the hook is currently executing.) | seagrass/hooks/runtime_audit_hook.py | is_active | kernelmethod/Seagrass | 0 | python | @property
def is_active(self) -> bool:
'Return whether or not the hook is currently active (i.e., whether a Seagrass event that\n uses the hook is currently executing.)'
return self.__is_active | @property
def is_active(self) -> bool:
'Return whether or not the hook is currently active (i.e., whether a Seagrass event that\n uses the hook is currently executing.)'
return self.__is_active<|docstring|>Return whether or not the hook is currently active (i.e., whether a Seagrass event that
uses the hook is currently executing.)<|endoftext|> |
9c4479ebd91965dafb788edd5426d9edc8ceec88d923af0d0b2f240bdb99492a | @property
def current_event(self) -> t.Optional[str]:
"Returns the current Seagrass event being executed that's hooked by this function. If no\n events using this hook are being executed, ``current_event`` is ``None``."
return self.__current_event | Returns the current Seagrass event being executed that's hooked by this function. If no
events using this hook are being executed, ``current_event`` is ``None``. | seagrass/hooks/runtime_audit_hook.py | current_event | kernelmethod/Seagrass | 0 | python | @property
def current_event(self) -> t.Optional[str]:
"Returns the current Seagrass event being executed that's hooked by this function. If no\n events using this hook are being executed, ``current_event`` is ``None``."
return self.__current_event | @property
def current_event(self) -> t.Optional[str]:
"Returns the current Seagrass event being executed that's hooked by this function. If no\n events using this hook are being executed, ``current_event`` is ``None``."
return self.__current_event<|docstring|>Returns the current Seagrass event being executed that's hooked by this function. If no
events using this hook are being executed, ``current_event`` is ``None``.<|endoftext|> |
7e4b8b380f54b91e1af9b9d43375a7572dd8a18680bb9ac41fc8087e1e058b13 | def __update(func: t.Callable[(..., R)]) -> t.Callable[(..., R)]:
'Function decorator that causes functions to reset the current_event and is_active\n properties every time it gets called.'
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.__update_properties()
return wrapper | Function decorator that causes functions to reset the current_event and is_active
properties every time it gets called. | seagrass/hooks/runtime_audit_hook.py | __update | kernelmethod/Seagrass | 0 | python | def __update(func: t.Callable[(..., R)]) -> t.Callable[(..., R)]:
'Function decorator that causes functions to reset the current_event and is_active\n properties every time it gets called.'
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.__update_properties()
return wrapper | def __update(func: t.Callable[(..., R)]) -> t.Callable[(..., R)]:
'Function decorator that causes functions to reset the current_event and is_active\n properties every time it gets called.'
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.__update_properties()
return wrapper<|docstring|>Function decorator that causes functions to reset the current_event and is_active
properties every time it gets called.<|endoftext|> |
0a3d2a44f57a5f542d09a8bc38622bedd5b9dbff412d0fde415dd20040ff3cc7 | def __create_sys_hook(self) -> t.Callable[([str, t.Tuple[(t.Any, ...)]], None)]:
'Creates wrapper around the sys_hook abstract method that first checks whether the hook\n is currently active before it executes anything. This is the function that actually\n gets added with sys.addaudithook, not sys_hook.'
def __sys_hook(event: str, args: t.Tuple[(t.Any, ...)]) -> None:
if self.is_active:
try:
self.sys_hook(event, args)
except Exception as ex:
if self.propagate_errors:
raise ex
else:
logger = get_audit_logger(None)
if (logger is not None):
with self.__disable_runtime_hook():
logger.error('%s raised in %s.sys_hook: %s', ex.__class__.__name__, self.__class__.__name__, ex)
return __sys_hook | Creates wrapper around the sys_hook abstract method that first checks whether the hook
is currently active before it executes anything. This is the function that actually
gets added with sys.addaudithook, not sys_hook. | seagrass/hooks/runtime_audit_hook.py | __create_sys_hook | kernelmethod/Seagrass | 0 | python | def __create_sys_hook(self) -> t.Callable[([str, t.Tuple[(t.Any, ...)]], None)]:
'Creates wrapper around the sys_hook abstract method that first checks whether the hook\n is currently active before it executes anything. This is the function that actually\n gets added with sys.addaudithook, not sys_hook.'
def __sys_hook(event: str, args: t.Tuple[(t.Any, ...)]) -> None:
if self.is_active:
try:
self.sys_hook(event, args)
except Exception as ex:
if self.propagate_errors:
raise ex
else:
logger = get_audit_logger(None)
if (logger is not None):
with self.__disable_runtime_hook():
logger.error('%s raised in %s.sys_hook: %s', ex.__class__.__name__, self.__class__.__name__, ex)
return __sys_hook | def __create_sys_hook(self) -> t.Callable[([str, t.Tuple[(t.Any, ...)]], None)]:
'Creates wrapper around the sys_hook abstract method that first checks whether the hook\n is currently active before it executes anything. This is the function that actually\n gets added with sys.addaudithook, not sys_hook.'
def __sys_hook(event: str, args: t.Tuple[(t.Any, ...)]) -> None:
if self.is_active:
try:
self.sys_hook(event, args)
except Exception as ex:
if self.propagate_errors:
raise ex
else:
logger = get_audit_logger(None)
if (logger is not None):
with self.__disable_runtime_hook():
logger.error('%s raised in %s.sys_hook: %s', ex.__class__.__name__, self.__class__.__name__, ex)
return __sys_hook<|docstring|>Creates wrapper around the sys_hook abstract method that first checks whether the hook
is currently active before it executes anything. This is the function that actually
gets added with sys.addaudithook, not sys_hook.<|endoftext|> |
c2289bee767a4a94a755659f11d506ba3405b4bab37220cdf91f035f1e981c74 | @contextmanager
def __disable_runtime_hook(self) -> t.Iterator[None]:
'Temporarily the runtime hook.'
is_active = self.__is_active
self.__is_active = False
try:
(yield None)
finally:
self.__is_active = is_active | Temporarily the runtime hook. | seagrass/hooks/runtime_audit_hook.py | __disable_runtime_hook | kernelmethod/Seagrass | 0 | python | @contextmanager
def __disable_runtime_hook(self) -> t.Iterator[None]:
is_active = self.__is_active
self.__is_active = False
try:
(yield None)
finally:
self.__is_active = is_active | @contextmanager
def __disable_runtime_hook(self) -> t.Iterator[None]:
is_active = self.__is_active
self.__is_active = False
try:
(yield None)
finally:
self.__is_active = is_active<|docstring|>Temporarily the runtime hook.<|endoftext|> |
0632701844918ba11ed64bb76d620b52bd7e5429759b112dd97ca4cbadc6586e | def test_choice_2(self):
"dict_values instances in py3 weren't identified as sequences"
d = {'foo': 1, 'bar': 2}
r = testdata.choice(d.values())
self.assertTrue((r in set(d.values()))) | dict_values instances in py3 weren't identified as sequences | tests/testdata_test.py | test_choice_2 | Jaymon/testdata | 8 | python | def test_choice_2(self):
d = {'foo': 1, 'bar': 2}
r = testdata.choice(d.values())
self.assertTrue((r in set(d.values()))) | def test_choice_2(self):
d = {'foo': 1, 'bar': 2}
r = testdata.choice(d.values())
self.assertTrue((r in set(d.values())))<|docstring|>dict_values instances in py3 weren't identified as sequences<|endoftext|> |
fb52aa21055454524d30e922ebffe3881c9761992745fb78aab790a868e307c1 | def test_get_between_datetime_same_microseconds(self):
'noticed a problem when using the same now'
now = datetime.datetime.utcnow()
start_dt = testdata.get_past_datetime(now)
stop_dt = testdata.get_between_datetime(start_dt, now)
self.assertGreater(stop_dt, start_dt) | noticed a problem when using the same now | tests/testdata_test.py | test_get_between_datetime_same_microseconds | Jaymon/testdata | 8 | python | def test_get_between_datetime_same_microseconds(self):
now = datetime.datetime.utcnow()
start_dt = testdata.get_past_datetime(now)
stop_dt = testdata.get_between_datetime(start_dt, now)
self.assertGreater(stop_dt, start_dt) | def test_get_between_datetime_same_microseconds(self):
now = datetime.datetime.utcnow()
start_dt = testdata.get_past_datetime(now)
stop_dt = testdata.get_between_datetime(start_dt, now)
self.assertGreater(stop_dt, start_dt)<|docstring|>noticed a problem when using the same now<|endoftext|> |
4d0bef05ecf92425887ec209a3db6d012f175c243a14152ea948eebeee7fa580 | def evaluate_reg_param(inputs, targets, folds, centres, scale, test_error_linear, reg_params=None):
'\n Evaluate, then plot the performance of different regularisation parameters.\n '
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
if (reg_params is None):
reg_params = np.logspace((- 15), 5, 30)
num_values = reg_params.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
print('Calculating means and standard deviations of train and test errors...')
for (r, reg_param) in enumerate(reg_params):
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[r] = train_mean_error
test_mean_errors[r] = test_mean_error
train_st_dev_errors[r] = train_st_dev_error
test_st_dev_errors[r] = test_st_dev_error
(fig, ax) = plot_train_test_errors('$\\lambda$', reg_params, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Reg. Param. with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_reg_params_cross_validation.png', fmt='png')
plt.show() | Evaluate, then plot the performance of different regularisation parameters. | wine-quality-prediction/code/regression_rbf_cross_validation.py | evaluate_reg_param | f-z/machine-learning-regression-project | 4 | python | def evaluate_reg_param(inputs, targets, folds, centres, scale, test_error_linear, reg_params=None):
'\n \n '
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
if (reg_params is None):
reg_params = np.logspace((- 15), 5, 30)
num_values = reg_params.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
print('Calculating means and standard deviations of train and test errors...')
for (r, reg_param) in enumerate(reg_params):
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[r] = train_mean_error
test_mean_errors[r] = test_mean_error
train_st_dev_errors[r] = train_st_dev_error
test_st_dev_errors[r] = test_st_dev_error
(fig, ax) = plot_train_test_errors('$\\lambda$', reg_params, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Reg. Param. with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_reg_params_cross_validation.png', fmt='png')
plt.show() | def evaluate_reg_param(inputs, targets, folds, centres, scale, test_error_linear, reg_params=None):
'\n \n '
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
if (reg_params is None):
reg_params = np.logspace((- 15), 5, 30)
num_values = reg_params.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
print('Calculating means and standard deviations of train and test errors...')
for (r, reg_param) in enumerate(reg_params):
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[r] = train_mean_error
test_mean_errors[r] = test_mean_error
train_st_dev_errors[r] = train_st_dev_error
test_st_dev_errors[r] = test_st_dev_error
(fig, ax) = plot_train_test_errors('$\\lambda$', reg_params, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Reg. Param. with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_reg_params_cross_validation.png', fmt='png')
plt.show()<|docstring|>Evaluate, then plot the performance of different regularisation parameters.<|endoftext|> |
ffec4da174f3f03387c5a746b8c255b76398f486555881f780bbfc441439a3a0 | def evaluate_scale(inputs, targets, folds, centres, reg_param, test_error_linear, scales=None):
'\n Evaluate, then plot the performance of different basis function scales.\n '
if (scales is None):
scales = np.logspace(0, 8, 30)
num_values = scales.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
for (s, scale) in enumerate(scales):
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[s] = train_mean_error
test_mean_errors[s] = test_mean_error
train_st_dev_errors[s] = train_st_dev_error
test_st_dev_errors[s] = test_st_dev_error
(fig, ax) = plot_train_test_errors('scale', scales, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Scales with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_scales_cross_validation.png', fmt='png')
plt.show() | Evaluate, then plot the performance of different basis function scales. | wine-quality-prediction/code/regression_rbf_cross_validation.py | evaluate_scale | f-z/machine-learning-regression-project | 4 | python | def evaluate_scale(inputs, targets, folds, centres, reg_param, test_error_linear, scales=None):
'\n \n '
if (scales is None):
scales = np.logspace(0, 8, 30)
num_values = scales.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
for (s, scale) in enumerate(scales):
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[s] = train_mean_error
test_mean_errors[s] = test_mean_error
train_st_dev_errors[s] = train_st_dev_error
test_st_dev_errors[s] = test_st_dev_error
(fig, ax) = plot_train_test_errors('scale', scales, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Scales with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_scales_cross_validation.png', fmt='png')
plt.show() | def evaluate_scale(inputs, targets, folds, centres, reg_param, test_error_linear, scales=None):
'\n \n '
if (scales is None):
scales = np.logspace(0, 8, 30)
num_values = scales.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
for (s, scale) in enumerate(scales):
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(design_matrix, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_st_dev_error = np.std(train_errors)
test_st_dev_error = np.std(test_errors)
train_mean_errors[s] = train_mean_error
test_mean_errors[s] = test_mean_error
train_st_dev_errors[s] = train_st_dev_error
test_st_dev_errors[s] = test_st_dev_error
(fig, ax) = plot_train_test_errors('scale', scales, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(scales, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Scales with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_scales_cross_validation.png', fmt='png')
plt.show()<|docstring|>Evaluate, then plot the performance of different basis function scales.<|endoftext|> |
0148770cb920a3d645af779e0bcfaec51cd5a6e9e05103a7666dce01a2f69c24 | def evaluate_num_centres(inputs, targets, folds, scale, reg_param, test_error_linear, num_centres_sequence=None):
'\n Evaluate, then plot the performance of different numbers of basis\n function centres.\n '
if (num_centres_sequence is None):
num_centres_sequence = np.linspace(start=0.01, stop=1, num=20)
num_values = num_centres_sequence.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
n = inputs.shape[0]
for (c, centre_percentage) in enumerate(num_centres_sequence):
sample_fraction = centre_percentage
p = ((1 - sample_fraction), sample_fraction)
centres = inputs[(np.random.choice([False, True], size=n, p=p), :)]
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(designmtx, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
train_mean_errors[c] = train_mean_error
test_mean_errors[c] = test_mean_error
train_st_dev_errors[c] = train_stdev_error
test_st_dev_errors[c] = test_stdev_error
(fig, ax) = plot_train_test_errors('% of inputs as centres * 100', num_centres_sequence, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Centre Proportion with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_number_centres_cross_validation.png', fmt='png')
plt.show() | Evaluate, then plot the performance of different numbers of basis
function centres. | wine-quality-prediction/code/regression_rbf_cross_validation.py | evaluate_num_centres | f-z/machine-learning-regression-project | 4 | python | def evaluate_num_centres(inputs, targets, folds, scale, reg_param, test_error_linear, num_centres_sequence=None):
'\n Evaluate, then plot the performance of different numbers of basis\n function centres.\n '
if (num_centres_sequence is None):
num_centres_sequence = np.linspace(start=0.01, stop=1, num=20)
num_values = num_centres_sequence.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
n = inputs.shape[0]
for (c, centre_percentage) in enumerate(num_centres_sequence):
sample_fraction = centre_percentage
p = ((1 - sample_fraction), sample_fraction)
centres = inputs[(np.random.choice([False, True], size=n, p=p), :)]
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(designmtx, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
train_mean_errors[c] = train_mean_error
test_mean_errors[c] = test_mean_error
train_st_dev_errors[c] = train_stdev_error
test_st_dev_errors[c] = test_stdev_error
(fig, ax) = plot_train_test_errors('% of inputs as centres * 100', num_centres_sequence, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Centre Proportion with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_number_centres_cross_validation.png', fmt='png')
plt.show() | def evaluate_num_centres(inputs, targets, folds, scale, reg_param, test_error_linear, num_centres_sequence=None):
'\n Evaluate, then plot the performance of different numbers of basis\n function centres.\n '
if (num_centres_sequence is None):
num_centres_sequence = np.linspace(start=0.01, stop=1, num=20)
num_values = num_centres_sequence.size
num_folds = len(folds)
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_st_dev_errors = np.zeros(num_values)
test_st_dev_errors = np.zeros(num_values)
n = inputs.shape[0]
for (c, centre_percentage) in enumerate(num_centres_sequence):
sample_fraction = centre_percentage
p = ((1 - sample_fraction), sample_fraction)
centres = inputs[(np.random.choice([False, True], size=n, p=p), :)]
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
(train_errors, test_errors) = cv_evaluation_linear_model(designmtx, targets, folds, reg_param=reg_param)
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
train_mean_errors[c] = train_mean_error
test_mean_errors[c] = test_mean_error
train_st_dev_errors[c] = train_stdev_error
test_st_dev_errors[c] = test_stdev_error
(fig, ax) = plot_train_test_errors('% of inputs as centres * 100', num_centres_sequence, train_mean_errors, test_mean_errors, test_error_linear)
lower = (train_mean_errors - (train_st_dev_errors / np.sqrt(num_folds)))
upper = (train_mean_errors + (train_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')
lower = (test_mean_errors - (test_st_dev_errors / np.sqrt(num_folds)))
upper = (test_mean_errors + (test_st_dev_errors / np.sqrt(num_folds)))
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')
ax.set_ylim([0, 1])
ax.set_title('Train vs Test Error across Centre Proportion with Cross-validation')
fig.savefig('../plots/rbf/rbf_searching_number_centres_cross_validation.png', fmt='png')
plt.show()<|docstring|>Evaluate, then plot the performance of different numbers of basis
function centres.<|endoftext|> |
01c48f8d3e3474ede2636b153a6cd7d554d4bd2a89c9106cdcac325fb426765c | def main(inputs, targets, test_error_linear, best_scale=None, best_reg_param=None, best_no_centres=None):
'\n This function contains example code that demonstrates how to use the \n functions defined in poly_fit_base for fitting polynomial curves to data.\n '
np.random.seed(30)
if (best_scale is None):
best_scale = 6.7
if (best_reg_param is None):
best_reg_param = 9.2e-08
print('\nPerforming cross-validation...')
num_folds = 5
folds = create_cv_folds(inputs.shape[0], num_folds)
std_inputs = standardise(inputs)
centres = std_inputs[(np.random.choice([False, True], size=std_inputs.shape[0], p=[(1 - best_no_centres), best_no_centres]), :)]
print('Evaluating reg. parameters...')
evaluate_reg_param(std_inputs, targets, folds, centres, best_scale, test_error_linear)
print('\nEvaluating scales...')
evaluate_scale(std_inputs, targets, folds, centres, best_reg_param, test_error_linear)
print('\nEvaluating proportion of centres...')
evaluate_num_centres(std_inputs, targets, folds, best_scale, best_reg_param, test_error_linear) | This function contains example code that demonstrates how to use the
functions defined in poly_fit_base for fitting polynomial curves to data. | wine-quality-prediction/code/regression_rbf_cross_validation.py | main | f-z/machine-learning-regression-project | 4 | python | def main(inputs, targets, test_error_linear, best_scale=None, best_reg_param=None, best_no_centres=None):
'\n This function contains example code that demonstrates how to use the \n functions defined in poly_fit_base for fitting polynomial curves to data.\n '
np.random.seed(30)
if (best_scale is None):
best_scale = 6.7
if (best_reg_param is None):
best_reg_param = 9.2e-08
print('\nPerforming cross-validation...')
num_folds = 5
folds = create_cv_folds(inputs.shape[0], num_folds)
std_inputs = standardise(inputs)
centres = std_inputs[(np.random.choice([False, True], size=std_inputs.shape[0], p=[(1 - best_no_centres), best_no_centres]), :)]
print('Evaluating reg. parameters...')
evaluate_reg_param(std_inputs, targets, folds, centres, best_scale, test_error_linear)
print('\nEvaluating scales...')
evaluate_scale(std_inputs, targets, folds, centres, best_reg_param, test_error_linear)
print('\nEvaluating proportion of centres...')
evaluate_num_centres(std_inputs, targets, folds, best_scale, best_reg_param, test_error_linear) | def main(inputs, targets, test_error_linear, best_scale=None, best_reg_param=None, best_no_centres=None):
'\n This function contains example code that demonstrates how to use the \n functions defined in poly_fit_base for fitting polynomial curves to data.\n '
np.random.seed(30)
if (best_scale is None):
best_scale = 6.7
if (best_reg_param is None):
best_reg_param = 9.2e-08
print('\nPerforming cross-validation...')
num_folds = 5
folds = create_cv_folds(inputs.shape[0], num_folds)
std_inputs = standardise(inputs)
centres = std_inputs[(np.random.choice([False, True], size=std_inputs.shape[0], p=[(1 - best_no_centres), best_no_centres]), :)]
print('Evaluating reg. parameters...')
evaluate_reg_param(std_inputs, targets, folds, centres, best_scale, test_error_linear)
print('\nEvaluating scales...')
evaluate_scale(std_inputs, targets, folds, centres, best_reg_param, test_error_linear)
print('\nEvaluating proportion of centres...')
evaluate_num_centres(std_inputs, targets, folds, best_scale, best_reg_param, test_error_linear)<|docstring|>This function contains example code that demonstrates how to use the
functions defined in poly_fit_base for fitting polynomial curves to data.<|endoftext|> |
352527d29b6253962e174e9d716e0c2c8cda23197e0b26ee0f8ee9c252052917 | def decorrelation_length(x, min_autocorrelation):
" decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.\n\t\tInputs:\n\t\t- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)\n\t\t- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.\n\t\tOutputs:\n\t\t- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.\n\t\t-----------------------------\n\t\tThis is part of WAVEPAL\n\t\t(C) 2016 G. Lenoir"
n = x.size
mylength = np.nan
lag_min = 0
lag_max = 9
mybreak = False
while (lag_max < n):
r = autocorrelation(x, lag_min, lag_max)
for k in range(10):
if (r[k] < min_autocorrelation):
mylength = (lag_min + k)
mybreak = True
break
if (mybreak == True):
break
lag_min += 10
lag_max += 10
return mylength | decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.
Inputs:
- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)
- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.
Outputs:
- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir | wavepal/decorrelation_length.py | decorrelation_length | metegenez/WAVEPAL | 22 | python | def decorrelation_length(x, min_autocorrelation):
" decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.\n\t\tInputs:\n\t\t- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)\n\t\t- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.\n\t\tOutputs:\n\t\t- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.\n\t\t-----------------------------\n\t\tThis is part of WAVEPAL\n\t\t(C) 2016 G. Lenoir"
n = x.size
mylength = np.nan
lag_min = 0
lag_max = 9
mybreak = False
while (lag_max < n):
r = autocorrelation(x, lag_min, lag_max)
for k in range(10):
if (r[k] < min_autocorrelation):
mylength = (lag_min + k)
mybreak = True
break
if (mybreak == True):
break
lag_min += 10
lag_max += 10
return mylength | def decorrelation_length(x, min_autocorrelation):
" decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.\n\t\tInputs:\n\t\t- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)\n\t\t- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.\n\t\tOutputs:\n\t\t- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.\n\t\t-----------------------------\n\t\tThis is part of WAVEPAL\n\t\t(C) 2016 G. Lenoir"
n = x.size
mylength = np.nan
lag_min = 0
lag_max = 9
mybreak = False
while (lag_max < n):
r = autocorrelation(x, lag_min, lag_max)
for k in range(10):
if (r[k] < min_autocorrelation):
mylength = (lag_min + k)
mybreak = True
break
if (mybreak == True):
break
lag_min += 10
lag_max += 10
return mylength<|docstring|>decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.
Inputs:
- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)
- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.
Outputs:
- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir<|endoftext|> |
4f9e04b8a7d290bc8dd7692e40c0abff1c11f1f3b5185f848eb359b340896636 | def test_update_player():
'ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΏΠΎΠ»ΠΎΠΆΠ΅Π½ΠΈΡ ΠΏΠ΅ΡΡΠΎΠ½Π°ΠΆΠ° Π½Π° ΠΈΠ³ΡΠΎΠ²ΠΎΠΌ ΠΏΠΎΠ»Π΅'
(playa, x, y) = generate_level(['.....', '...@...', '....'])
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == (- 10))
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_UP)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == 10)
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_RIGHT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == (- 10))
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_LEFT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == 10) | ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΏΠΎΠ»ΠΎΠΆΠ΅Π½ΠΈΡ ΠΏΠ΅ΡΡΠΎΠ½Π°ΠΆΠ° Π½Π° ΠΈΠ³ΡΠΎΠ²ΠΎΠΌ ΠΏΠΎΠ»Π΅ | test_main_game.py | test_update_player | lotofmyself/Death-stranding_game | 0 | python | def test_update_player():
(playa, x, y) = generate_level(['.....', '...@...', '....'])
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == (- 10))
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_UP)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == 10)
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_RIGHT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == (- 10))
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_LEFT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == 10) | def test_update_player():
(playa, x, y) = generate_level(['.....', '...@...', '....'])
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == (- 10))
first_pos_y = playa.rect.y
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_UP)
player_group.update(e1, playa)
assert ((first_pos_y - playa.rect.y) == 10)
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_RIGHT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == (- 10))
first_pos_x = playa.rect.x
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_LEFT)
player_group.update(e1, playa)
assert ((first_pos_x - playa.rect.x) == 10)<|docstring|>ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΏΠΎΠ»ΠΎΠΆΠ΅Π½ΠΈΡ ΠΏΠ΅ΡΡΠΎΠ½Π°ΠΆΠ° Π½Π° ΠΈΠ³ΡΠΎΠ²ΠΎΠΌ ΠΏΠΎΠ»Π΅<|endoftext|> |
4f61e7eb0c37171d68584a9fd5d9893dd5cfc2576790db9b6696113888be55b2 | def test_load_level():
'ΠΡΠΎΠ²Π΅ΡΠΊΠ° Π·Π°ΠΏΠΎΠ»Π½Π΅Π½ΠΈΡ ΡΡΠΎΠ²Π½Ρ'
as_result = ['.....................................................', '.....................................................', '.....................................................', '.....................................................', '...........###############################...........', '...........#@.............#............-##...........', '...........#.....#####....####.........###...........', '...........#..##########...#...###########...........', '...........##.###.......#####.######.....#...........', '...........#......##########...########.##...........', '...........#....######...###...####....###...........', '...........####...%###..................##...........', '...........###..##################.#######...........', '...........#...#.....#.............####..#...........', '...........###.##############......####..#...........', '...........#....##############....#####..#...........', '...........####...##.....######...#####.##...........', '...........###...#...#.....######..#######...........', '...........#.......#######..............##...........', '...........###############################...........', '.....................................................', '.....................................................', '.....................................................', '.....................................................']
assert (load_level('1lvl.txt') == as_result) | ΠΡΠΎΠ²Π΅ΡΠΊΠ° Π·Π°ΠΏΠΎΠ»Π½Π΅Π½ΠΈΡ ΡΡΠΎΠ²Π½Ρ | test_main_game.py | test_load_level | lotofmyself/Death-stranding_game | 0 | python | def test_load_level():
as_result = ['.....................................................', '.....................................................', '.....................................................', '.....................................................', '...........###############################...........', '...........#@.............#............-##...........', '...........#.....#####....####.........###...........', '...........#..##########...#...###########...........', '...........##.###.......#####.######.....#...........', '...........#......##########...########.##...........', '...........#....######...###...####....###...........', '...........####...%###..................##...........', '...........###..##################.#######...........', '...........#...#.....#.............####..#...........', '...........###.##############......####..#...........', '...........#....##############....#####..#...........', '...........####...##.....######...#####.##...........', '...........###...#...#.....######..#######...........', '...........#.......#######..............##...........', '...........###############################...........', '.....................................................', '.....................................................', '.....................................................', '.....................................................']
assert (load_level('1lvl.txt') == as_result) | def test_load_level():
as_result = ['.....................................................', '.....................................................', '.....................................................', '.....................................................', '...........###############################...........', '...........#@.............#............-##...........', '...........#.....#####....####.........###...........', '...........#..##########...#...###########...........', '...........##.###.......#####.######.....#...........', '...........#......##########...########.##...........', '...........#....######...###...####....###...........', '...........####...%###..................##...........', '...........###..##################.#######...........', '...........#...#.....#.............####..#...........', '...........###.##############......####..#...........', '...........#....##############....#####..#...........', '...........####...##.....######...#####.##...........', '...........###...#...#.....######..#######...........', '...........#.......#######..............##...........', '...........###############################...........', '.....................................................', '.....................................................', '.....................................................', '.....................................................']
assert (load_level('1lvl.txt') == as_result)<|docstring|>ΠΡΠΎΠ²Π΅ΡΠΊΠ° Π·Π°ΠΏΠΎΠ»Π½Π΅Π½ΠΈΡ ΡΡΠΎΠ²Π½Ρ<|endoftext|> |
ae8fdf4b406b70294fac315fc883160e3d04608189ec828340a3a0421322906a | def test_generate_level():
'ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΎΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΈΠ³ΡΠΎΠΊΠ° ΠΈ ΠΏΡΠΈΡΠ²ΠΎΠ΅Π½ΠΈΡ Π΅ΠΌΡ ΡΠ°Π·ΠΌΠ΅ΡΠΎΠ²'
(playa, x, y) = generate_level('...@...')
assert ((playa.x == 0) and (playa.y == 3) and (playa.rect == (15, 155, 40, 40)))
(playa, x, y) = generate_level('......')
assert (playa is None) | ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΎΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΈΠ³ΡΠΎΠΊΠ° ΠΈ ΠΏΡΠΈΡΠ²ΠΎΠ΅Π½ΠΈΡ Π΅ΠΌΡ ΡΠ°Π·ΠΌΠ΅ΡΠΎΠ² | test_main_game.py | test_generate_level | lotofmyself/Death-stranding_game | 0 | python | def test_generate_level():
(playa, x, y) = generate_level('...@...')
assert ((playa.x == 0) and (playa.y == 3) and (playa.rect == (15, 155, 40, 40)))
(playa, x, y) = generate_level('......')
assert (playa is None) | def test_generate_level():
(playa, x, y) = generate_level('...@...')
assert ((playa.x == 0) and (playa.y == 3) and (playa.rect == (15, 155, 40, 40)))
(playa, x, y) = generate_level('......')
assert (playa is None)<|docstring|>ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΎΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΈΠ³ΡΠΎΠΊΠ° ΠΈ ΠΏΡΠΈΡΠ²ΠΎΠ΅Π½ΠΈΡ Π΅ΠΌΡ ΡΠ°Π·ΠΌΠ΅ΡΠΎΠ²<|endoftext|> |
398888c0d54896d91f44879876d297dd0686d35882f1d479ad91cb3fd1249530 | def test_update_camera():
'ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΊΠ°ΠΌΠ΅ΡΡ'
level = load_level('1lvl.txt')
(playa, x, y) = generate_level(level)
camera = Camera((x, y))
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
n_pos_x = camera.dx
n_pos_y = camera.dy
camera.update(playa)
assert ((camera.dx != n_pos_x) and (camera.dy != n_pos_y)) | ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΊΠ°ΠΌΠ΅ΡΡ | test_main_game.py | test_update_camera | lotofmyself/Death-stranding_game | 0 | python | def test_update_camera():
level = load_level('1lvl.txt')
(playa, x, y) = generate_level(level)
camera = Camera((x, y))
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
n_pos_x = camera.dx
n_pos_y = camera.dy
camera.update(playa)
assert ((camera.dx != n_pos_x) and (camera.dy != n_pos_y)) | def test_update_camera():
level = load_level('1lvl.txt')
(playa, x, y) = generate_level(level)
camera = Camera((x, y))
e1 = pygame.event.Event(pygame.K_DOWN, key=pygame.K_DOWN)
player_group.update(e1, playa)
n_pos_x = camera.dx
n_pos_y = camera.dy
camera.update(playa)
assert ((camera.dx != n_pos_x) and (camera.dy != n_pos_y))<|docstring|>ΠΡΠΎΠ²Π΅ΡΠΊΠ° ΠΈΠ·ΠΌΠ΅Π½Π΅Π½ΠΈΡ ΠΊΠΎΠΎΡΠ΄ΠΈΠ½Π°Ρ ΠΊΠ°ΠΌΠ΅ΡΡ<|endoftext|> |
7a244cfd68a4f1d337ffff5cea92ea11f775a36818d8d720a8961c8591d88533 | def download_bin(self, url, file_name, **kw):
'δΈθ½½δΊθΏεΆζδ»Ά'
if os.path.exists(file_name):
return
if kw.pop('stream', True):
chunk_size = kw.pop('chunk_size', 1024)
res = self.get(url, stream=True, **kw)
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size=chunk_size):
if (not chunk):
break
f.write(chunk)
else:
res = self.get(url, **kw)
with open(file_name, 'wb') as f:
f.write(res.content) | δΈθ½½δΊθΏεΆζδ»Ά | utils/crawler.py | download_bin | billchenchina/mooc-dl | 0 | python | def download_bin(self, url, file_name, **kw):
if os.path.exists(file_name):
return
if kw.pop('stream', True):
chunk_size = kw.pop('chunk_size', 1024)
res = self.get(url, stream=True, **kw)
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size=chunk_size):
if (not chunk):
break
f.write(chunk)
else:
res = self.get(url, **kw)
with open(file_name, 'wb') as f:
f.write(res.content) | def download_bin(self, url, file_name, **kw):
if os.path.exists(file_name):
return
if kw.pop('stream', True):
chunk_size = kw.pop('chunk_size', 1024)
res = self.get(url, stream=True, **kw)
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size=chunk_size):
if (not chunk):
break
f.write(chunk)
else:
res = self.get(url, **kw)
with open(file_name, 'wb') as f:
f.write(res.content)<|docstring|>δΈθ½½δΊθΏεΆζδ»Ά<|endoftext|> |
c2db17c42c16a8a19833278932d282fb49b0f6cf7d834ac1e63283c6da479fd4 | def download_text(self, url, file_name, **kw):
'δΈθ½½ζζ¬οΌδ»₯ UTF-8 ηΌη δΏεζδ»Ά'
if os.path.exists(file_name):
return
res = self.get(url, **kw)
res.encoding = res.apparent_encoding
with open(file_name, 'w', encoding='utf_8') as f:
f.write(res.text) | δΈθ½½ζζ¬οΌδ»₯ UTF-8 ηΌη δΏεζδ»Ά | utils/crawler.py | download_text | billchenchina/mooc-dl | 0 | python | def download_text(self, url, file_name, **kw):
if os.path.exists(file_name):
return
res = self.get(url, **kw)
res.encoding = res.apparent_encoding
with open(file_name, 'w', encoding='utf_8') as f:
f.write(res.text) | def download_text(self, url, file_name, **kw):
if os.path.exists(file_name):
return
res = self.get(url, **kw)
res.encoding = res.apparent_encoding
with open(file_name, 'w', encoding='utf_8') as f:
f.write(res.text)<|docstring|>δΈθ½½ζζ¬οΌδ»₯ UTF-8 ηΌη δΏεζδ»Ά<|endoftext|> |
1c7d98bf70998e4ef548b126986b3413391b7d848ca892cadd56132667437eb0 | @git_temp_home_func()
def test_operation_with_reset_with_multiprocess_conflict(self):
'\n Create a bunch of processes trying to push to the same repo.\n This sometimes creates a git locking issue and tests the operation push retry code.\n '
r1 = self._make_repo()
r1.write_temp_content(['file foo.txt "_foo" 644'])
r1.add(['foo.txt'])
r1.commit('add foo.txt', ['foo.txt'])
r1.push('origin', 'master')
def worker(n):
worker_tmp_root = self.make_temp_dir(suffix='worker-{}'.format(n))
worker_repo = git_repo(worker_tmp_root, address=r1.address)
worker_repo.clone_or_pull()
worker_repo.checkout('master')
def _op(repo):
old_content = repo.read_file('foo.txt', codec='utf8')
new_content = '{}\nworker {}'.format(old_content, n)
fp = repo.file_path('foo.txt')
file_util.save(fp, content=new_content, codec='utf8', mode=420)
worker_repo.operation_with_reset(_op, 'from worker {}'.format(n))
num_jobs = 9
jobs = []
for i in range(num_jobs):
p = multiprocessing.Process(target=worker, args=(i,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
r2 = r1.make_temp_cloned_repo()
self.assertEqual(['_foo', 'worker 0', 'worker 1', 'worker 2', 'worker 3', 'worker 4', 'worker 5', 'worker 6', 'worker 7', 'worker 8'], sorted(r2.read_file('foo.txt', codec='utf8').split('\n'))) | Create a bunch of processes trying to push to the same repo.
This sometimes creates a git locking issue and tests the operation push retry code. | tests/lib/bes/git/test_git_repo.py | test_operation_with_reset_with_multiprocess_conflict | reconstruir/bes | 0 | python | @git_temp_home_func()
def test_operation_with_reset_with_multiprocess_conflict(self):
'\n Create a bunch of processes trying to push to the same repo.\n This sometimes creates a git locking issue and tests the operation push retry code.\n '
r1 = self._make_repo()
r1.write_temp_content(['file foo.txt "_foo" 644'])
r1.add(['foo.txt'])
r1.commit('add foo.txt', ['foo.txt'])
r1.push('origin', 'master')
def worker(n):
worker_tmp_root = self.make_temp_dir(suffix='worker-{}'.format(n))
worker_repo = git_repo(worker_tmp_root, address=r1.address)
worker_repo.clone_or_pull()
worker_repo.checkout('master')
def _op(repo):
old_content = repo.read_file('foo.txt', codec='utf8')
new_content = '{}\nworker {}'.format(old_content, n)
fp = repo.file_path('foo.txt')
file_util.save(fp, content=new_content, codec='utf8', mode=420)
worker_repo.operation_with_reset(_op, 'from worker {}'.format(n))
num_jobs = 9
jobs = []
for i in range(num_jobs):
p = multiprocessing.Process(target=worker, args=(i,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
r2 = r1.make_temp_cloned_repo()
self.assertEqual(['_foo', 'worker 0', 'worker 1', 'worker 2', 'worker 3', 'worker 4', 'worker 5', 'worker 6', 'worker 7', 'worker 8'], sorted(r2.read_file('foo.txt', codec='utf8').split('\n'))) | @git_temp_home_func()
def test_operation_with_reset_with_multiprocess_conflict(self):
'\n Create a bunch of processes trying to push to the same repo.\n This sometimes creates a git locking issue and tests the operation push retry code.\n '
r1 = self._make_repo()
r1.write_temp_content(['file foo.txt "_foo" 644'])
r1.add(['foo.txt'])
r1.commit('add foo.txt', ['foo.txt'])
r1.push('origin', 'master')
def worker(n):
worker_tmp_root = self.make_temp_dir(suffix='worker-{}'.format(n))
worker_repo = git_repo(worker_tmp_root, address=r1.address)
worker_repo.clone_or_pull()
worker_repo.checkout('master')
def _op(repo):
old_content = repo.read_file('foo.txt', codec='utf8')
new_content = '{}\nworker {}'.format(old_content, n)
fp = repo.file_path('foo.txt')
file_util.save(fp, content=new_content, codec='utf8', mode=420)
worker_repo.operation_with_reset(_op, 'from worker {}'.format(n))
num_jobs = 9
jobs = []
for i in range(num_jobs):
p = multiprocessing.Process(target=worker, args=(i,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
r2 = r1.make_temp_cloned_repo()
self.assertEqual(['_foo', 'worker 0', 'worker 1', 'worker 2', 'worker 3', 'worker 4', 'worker 5', 'worker 6', 'worker 7', 'worker 8'], sorted(r2.read_file('foo.txt', codec='utf8').split('\n')))<|docstring|>Create a bunch of processes trying to push to the same repo.
This sometimes creates a git locking issue and tests the operation push retry code.<|endoftext|> |
c52f4cf08f21d009ce3a2787576292b131a645a27d7f54c637a88cffb82d98bf | @git_temp_home_func()
def test_head_info_empty_repo(self):
'Test head_info() works on an empty just created repo.'
tmp_dir = self.make_temp_dir()
git.init(tmp_dir)
r = git_repo(tmp_dir)
self.assertEqual(('nothing', None, None, None, None, None), r.head_info()) | Test head_info() works on an empty just created repo. | tests/lib/bes/git/test_git_repo.py | test_head_info_empty_repo | reconstruir/bes | 0 | python | @git_temp_home_func()
def test_head_info_empty_repo(self):
tmp_dir = self.make_temp_dir()
git.init(tmp_dir)
r = git_repo(tmp_dir)
self.assertEqual(('nothing', None, None, None, None, None), r.head_info()) | @git_temp_home_func()
def test_head_info_empty_repo(self):
tmp_dir = self.make_temp_dir()
git.init(tmp_dir)
r = git_repo(tmp_dir)
self.assertEqual(('nothing', None, None, None, None, None), r.head_info())<|docstring|>Test head_info() works on an empty just created repo.<|endoftext|> |
dff11404496c477d5de801b7a8a59966f332c6df98afbb003c9ae02c0ff3676b | def load_tests(loader, tests, pattern):
'Create test suite'
suite = unittest.TestSuite()
for test_suite in suites:
suite.addTest(test_suite)
return suite | Create test suite | test.py | load_tests | bogdan-kulynych/snowballing | 37 | python | def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
for test_suite in suites:
suite.addTest(test_suite)
return suite | def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
for test_suite in suites:
suite.addTest(test_suite)
return suite<|docstring|>Create test suite<|endoftext|> |
485b6b999ef02489b32c998b1f22d687db1a4a461cba409ae1c2edaec65f29c5 | def _set_random_states(estimator, random_state=None):
"Sets fixed random_state parameters for an estimator. Internal use only.\n Modified from sklearn/base.py\n\n Finds all parameters ending ``random_state`` and sets them to integers\n derived from ``random_state``.\n\n Parameters\n ----------\n estimator : estimator supporting get/set_params\n Estimator with potential randomness managed by random_state\n parameters.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Notes\n -----\n This does not necessarily set *all* ``random_state`` attributes that\n control an estimator's randomness, only those accessible through\n ``estimator.get_params()``. ``random_state``s not controlled include\n those belonging to:\n\n * cross-validation splitters\n * ``scipy.stats`` rvs\n "
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if ((key == 'random_state') or key.endswith('__random_state')):
to_set[key] = random_state.randint(MAX_INT)
if to_set:
estimator.set_params(**to_set) | Sets fixed random_state parameters for an estimator. Internal use only.
Modified from sklearn/base.py
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs | pyod/models/feature_bagging.py | _set_random_states | vishalbelsare/pyod | 5,126 | python | def _set_random_states(estimator, random_state=None):
"Sets fixed random_state parameters for an estimator. Internal use only.\n Modified from sklearn/base.py\n\n Finds all parameters ending ``random_state`` and sets them to integers\n derived from ``random_state``.\n\n Parameters\n ----------\n estimator : estimator supporting get/set_params\n Estimator with potential randomness managed by random_state\n parameters.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Notes\n -----\n This does not necessarily set *all* ``random_state`` attributes that\n control an estimator's randomness, only those accessible through\n ``estimator.get_params()``. ``random_state``s not controlled include\n those belonging to:\n\n * cross-validation splitters\n * ``scipy.stats`` rvs\n "
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if ((key == 'random_state') or key.endswith('__random_state')):
to_set[key] = random_state.randint(MAX_INT)
if to_set:
estimator.set_params(**to_set) | def _set_random_states(estimator, random_state=None):
"Sets fixed random_state parameters for an estimator. Internal use only.\n Modified from sklearn/base.py\n\n Finds all parameters ending ``random_state`` and sets them to integers\n derived from ``random_state``.\n\n Parameters\n ----------\n estimator : estimator supporting get/set_params\n Estimator with potential randomness managed by random_state\n parameters.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Notes\n -----\n This does not necessarily set *all* ``random_state`` attributes that\n control an estimator's randomness, only those accessible through\n ``estimator.get_params()``. ``random_state``s not controlled include\n those belonging to:\n\n * cross-validation splitters\n * ``scipy.stats`` rvs\n "
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if ((key == 'random_state') or key.endswith('__random_state')):
to_set[key] = random_state.randint(MAX_INT)
if to_set:
estimator.set_params(**to_set)<|docstring|>Sets fixed random_state parameters for an estimator. Internal use only.
Modified from sklearn/base.py
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs<|endoftext|> |
010bc468bb291ec6f7a3c5e829d5bce8f2cfd5d1a555608d43703dcb55a17286 | def fit(self, X, y=None):
'Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n '
random_state = check_random_state(self.random_state)
X = check_array(X)
(self.n_samples_, self.n_features_) = (X.shape[0], X.shape[1])
self._set_n_classes(y)
check_parameter(self.n_features_, low=2, include_left=True, param_name='n_features')
self._validate_estimator(default=LOF(n_jobs=self.n_jobs))
self.min_features_ = int((0.5 * self.n_features_))
if isinstance(self.max_features, (numbers.Integral, np.integer)):
self.max_features_ = self.max_features
else:
self.max_features_ = int((self.max_features * self.n_features_))
check_parameter(self.max_features_, low=self.min_features_, param_name='max_features', high=self.n_features_, include_left=True, include_right=True)
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = (self.n_estimators - len(self.estimators_))
if (n_more_estimators < 0):
raise ValueError(('n_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
for i in range(self.n_estimators):
random_state = np.random.RandomState(seeds[i])
features = generate_bagging_indices(random_state, self.bootstrap_features, self.n_features_, self.min_features_, (self.max_features_ + 1))
estimator = self._make_estimator(append=False, random_state=random_state)
estimator.fit(X[(:, features)])
self.estimators_.append(estimator)
self.estimators_features_.append(features)
all_decision_scores = self._get_decision_scores()
if (self.combination == 'average'):
self.decision_scores_ = average(all_decision_scores)
else:
self.decision_scores_ = maximization(all_decision_scores)
self._process_decision_scores()
return self | Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator. | pyod/models/feature_bagging.py | fit | vishalbelsare/pyod | 5,126 | python | def fit(self, X, y=None):
'Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n '
random_state = check_random_state(self.random_state)
X = check_array(X)
(self.n_samples_, self.n_features_) = (X.shape[0], X.shape[1])
self._set_n_classes(y)
check_parameter(self.n_features_, low=2, include_left=True, param_name='n_features')
self._validate_estimator(default=LOF(n_jobs=self.n_jobs))
self.min_features_ = int((0.5 * self.n_features_))
if isinstance(self.max_features, (numbers.Integral, np.integer)):
self.max_features_ = self.max_features
else:
self.max_features_ = int((self.max_features * self.n_features_))
check_parameter(self.max_features_, low=self.min_features_, param_name='max_features', high=self.n_features_, include_left=True, include_right=True)
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = (self.n_estimators - len(self.estimators_))
if (n_more_estimators < 0):
raise ValueError(('n_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
for i in range(self.n_estimators):
random_state = np.random.RandomState(seeds[i])
features = generate_bagging_indices(random_state, self.bootstrap_features, self.n_features_, self.min_features_, (self.max_features_ + 1))
estimator = self._make_estimator(append=False, random_state=random_state)
estimator.fit(X[(:, features)])
self.estimators_.append(estimator)
self.estimators_features_.append(features)
all_decision_scores = self._get_decision_scores()
if (self.combination == 'average'):
self.decision_scores_ = average(all_decision_scores)
else:
self.decision_scores_ = maximization(all_decision_scores)
self._process_decision_scores()
return self | def fit(self, X, y=None):
'Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n '
random_state = check_random_state(self.random_state)
X = check_array(X)
(self.n_samples_, self.n_features_) = (X.shape[0], X.shape[1])
self._set_n_classes(y)
check_parameter(self.n_features_, low=2, include_left=True, param_name='n_features')
self._validate_estimator(default=LOF(n_jobs=self.n_jobs))
self.min_features_ = int((0.5 * self.n_features_))
if isinstance(self.max_features, (numbers.Integral, np.integer)):
self.max_features_ = self.max_features
else:
self.max_features_ = int((self.max_features * self.n_features_))
check_parameter(self.max_features_, low=self.min_features_, param_name='max_features', high=self.n_features_, include_left=True, include_right=True)
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = (self.n_estimators - len(self.estimators_))
if (n_more_estimators < 0):
raise ValueError(('n_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
for i in range(self.n_estimators):
random_state = np.random.RandomState(seeds[i])
features = generate_bagging_indices(random_state, self.bootstrap_features, self.n_features_, self.min_features_, (self.max_features_ + 1))
estimator = self._make_estimator(append=False, random_state=random_state)
estimator.fit(X[(:, features)])
self.estimators_.append(estimator)
self.estimators_features_.append(features)
all_decision_scores = self._get_decision_scores()
if (self.combination == 'average'):
self.decision_scores_ = average(all_decision_scores)
else:
self.decision_scores_ = maximization(all_decision_scores)
self._process_decision_scores()
return self<|docstring|>Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.<|endoftext|> |
babae5b52d1ecbcb12753d4dcefe9631f23613eca706619bfdb2b9734d471e06 | def decision_function(self, X):
'Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n '
check_is_fitted(self, ['estimators_', 'estimators_features_', 'decision_scores_', 'threshold_', 'labels_'])
X = check_array(X)
if (self.n_features_ != X.shape[1]):
raise ValueError('Number of features of the model must match the input. Model n_features is {0} and input n_features is {1}.'.format(self.n_features_, X.shape[1]))
all_pred_scores = self._predict_decision_scores(X)
if (self.combination == 'average'):
return average(all_pred_scores)
else:
return maximization(all_pred_scores) | Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples. | pyod/models/feature_bagging.py | decision_function | vishalbelsare/pyod | 5,126 | python | def decision_function(self, X):
'Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n '
check_is_fitted(self, ['estimators_', 'estimators_features_', 'decision_scores_', 'threshold_', 'labels_'])
X = check_array(X)
if (self.n_features_ != X.shape[1]):
raise ValueError('Number of features of the model must match the input. Model n_features is {0} and input n_features is {1}.'.format(self.n_features_, X.shape[1]))
all_pred_scores = self._predict_decision_scores(X)
if (self.combination == 'average'):
return average(all_pred_scores)
else:
return maximization(all_pred_scores) | def decision_function(self, X):
'Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n '
check_is_fitted(self, ['estimators_', 'estimators_features_', 'decision_scores_', 'threshold_', 'labels_'])
X = check_array(X)
if (self.n_features_ != X.shape[1]):
raise ValueError('Number of features of the model must match the input. Model n_features is {0} and input n_features is {1}.'.format(self.n_features_, X.shape[1]))
all_pred_scores = self._predict_decision_scores(X)
if (self.combination == 'average'):
return average(all_pred_scores)
else:
return maximization(all_pred_scores)<|docstring|>Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.<|endoftext|> |
78d12003b3f0e1dba4eb7324d5afc1ef964f599a034f9be9256153a6c3c4bcc7 | def _validate_estimator(self, default=None):
'Check the estimator and the n_estimator attribute, set the\n `base_estimator_` attribute.'
if (not isinstance(self.n_estimators, (numbers.Integral, np.integer))):
raise ValueError('n_estimators must be an integer, got {0}.'.format(type(self.n_estimators)))
if (self.n_estimators <= 0):
raise ValueError('n_estimators must be greater than zero, got {0}.'.format(self.n_estimators))
if (self.base_estimator is not None):
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if (self.base_estimator_ is None):
raise ValueError('base_estimator cannot be None')
if self.check_detector:
check_detector(self.base_estimator_) | Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute. | pyod/models/feature_bagging.py | _validate_estimator | vishalbelsare/pyod | 5,126 | python | def _validate_estimator(self, default=None):
'Check the estimator and the n_estimator attribute, set the\n `base_estimator_` attribute.'
if (not isinstance(self.n_estimators, (numbers.Integral, np.integer))):
raise ValueError('n_estimators must be an integer, got {0}.'.format(type(self.n_estimators)))
if (self.n_estimators <= 0):
raise ValueError('n_estimators must be greater than zero, got {0}.'.format(self.n_estimators))
if (self.base_estimator is not None):
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if (self.base_estimator_ is None):
raise ValueError('base_estimator cannot be None')
if self.check_detector:
check_detector(self.base_estimator_) | def _validate_estimator(self, default=None):
'Check the estimator and the n_estimator attribute, set the\n `base_estimator_` attribute.'
if (not isinstance(self.n_estimators, (numbers.Integral, np.integer))):
raise ValueError('n_estimators must be an integer, got {0}.'.format(type(self.n_estimators)))
if (self.n_estimators <= 0):
raise ValueError('n_estimators must be greater than zero, got {0}.'.format(self.n_estimators))
if (self.base_estimator is not None):
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if (self.base_estimator_ is None):
raise ValueError('base_estimator cannot be None')
if self.check_detector:
check_detector(self.base_estimator_)<|docstring|>Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute.<|endoftext|> |
1d6146ade98824485b8c3d73571757de19d6c95082e9622c2124af172a1de097 | def _make_estimator(self, append=True, random_state=None):
'Make and configure a copy of the `base_estimator_` attribute.\n\n sklearn/base.py\n\n Warning: This method should be used to properly instantiate new\n sub-estimators.\n '
estimator = clone(self.base_estimator_)
estimator.set_params(**self.estimator_params)
if (random_state is not None):
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator | Make and configure a copy of the `base_estimator_` attribute.
sklearn/base.py
Warning: This method should be used to properly instantiate new
sub-estimators. | pyod/models/feature_bagging.py | _make_estimator | vishalbelsare/pyod | 5,126 | python | def _make_estimator(self, append=True, random_state=None):
'Make and configure a copy of the `base_estimator_` attribute.\n\n sklearn/base.py\n\n Warning: This method should be used to properly instantiate new\n sub-estimators.\n '
estimator = clone(self.base_estimator_)
estimator.set_params(**self.estimator_params)
if (random_state is not None):
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator | def _make_estimator(self, append=True, random_state=None):
'Make and configure a copy of the `base_estimator_` attribute.\n\n sklearn/base.py\n\n Warning: This method should be used to properly instantiate new\n sub-estimators.\n '
estimator = clone(self.base_estimator_)
estimator.set_params(**self.estimator_params)
if (random_state is not None):
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator<|docstring|>Make and configure a copy of the `base_estimator_` attribute.
sklearn/base.py
Warning: This method should be used to properly instantiate new
sub-estimators.<|endoftext|> |
37844173608fe6ed253925eced667f4790e939c7234440f269ff1fa271eb8daf | def __len__(self):
'Returns the number of estimators in the ensemble.'
return len(self.estimators_) | Returns the number of estimators in the ensemble. | pyod/models/feature_bagging.py | __len__ | vishalbelsare/pyod | 5,126 | python | def __len__(self):
return len(self.estimators_) | def __len__(self):
return len(self.estimators_)<|docstring|>Returns the number of estimators in the ensemble.<|endoftext|> |
34bb9a4d620627f7dcbbee8e68bbe3349afe3153b686eb18e0f48199603b53c4 | def __getitem__(self, index):
"Returns the index'th estimator in the ensemble."
return self.estimators_[index] | Returns the index'th estimator in the ensemble. | pyod/models/feature_bagging.py | __getitem__ | vishalbelsare/pyod | 5,126 | python | def __getitem__(self, index):
return self.estimators_[index] | def __getitem__(self, index):
return self.estimators_[index]<|docstring|>Returns the index'th estimator in the ensemble.<|endoftext|> |
223cc5c782bc4a258a3720f6d415dab9e93dc43474e1f90e56f6a7431f5b6dd0 | def __iter__(self):
'Returns iterator over estimators in the ensemble.'
return iter(self.estimators_) | Returns iterator over estimators in the ensemble. | pyod/models/feature_bagging.py | __iter__ | vishalbelsare/pyod | 5,126 | python | def __iter__(self):
return iter(self.estimators_) | def __iter__(self):
return iter(self.estimators_)<|docstring|>Returns iterator over estimators in the ensemble.<|endoftext|> |
7c868715a59e580b0beb55db3be97c9bca31145670f9ed86e15a0d8e97d32882 | def get_or_create_task(**kwargs):
" Return an existing task or new task if it doesn't exist.\n\n This is intended to be exactly the same as `Model.objects.get_or_create()`\n except that the `track` kwarg is passed to `save`.\n "
track = kwargs.pop('track', True)
try:
task = Task.objects.get(**kwargs)
except Task.DoesNotExist:
task = Task(**dict(((k, v) for (k, v) in kwargs.items() if ('__' not in k))))
task.save(track=track)
return task | Return an existing task or new task if it doesn't exist.
This is intended to be exactly the same as `Model.objects.get_or_create()`
except that the `track` kwarg is passed to `save`. | django_task/task/models.py | get_or_create_task | campbellr/taskweb | 5 | python | def get_or_create_task(**kwargs):
" Return an existing task or new task if it doesn't exist.\n\n This is intended to be exactly the same as `Model.objects.get_or_create()`\n except that the `track` kwarg is passed to `save`.\n "
track = kwargs.pop('track', True)
try:
task = Task.objects.get(**kwargs)
except Task.DoesNotExist:
task = Task(**dict(((k, v) for (k, v) in kwargs.items() if ('__' not in k))))
task.save(track=track)
return task | def get_or_create_task(**kwargs):
" Return an existing task or new task if it doesn't exist.\n\n This is intended to be exactly the same as `Model.objects.get_or_create()`\n except that the `track` kwarg is passed to `save`.\n "
track = kwargs.pop('track', True)
try:
task = Task.objects.get(**kwargs)
except Task.DoesNotExist:
task = Task(**dict(((k, v) for (k, v) in kwargs.items() if ('__' not in k))))
task.save(track=track)
return task<|docstring|>Return an existing task or new task if it doesn't exist.
This is intended to be exactly the same as `Model.objects.get_or_create()`
except that the `track` kwarg is passed to `save`.<|endoftext|> |
ddcc70b74bc3e18a58c5b322cedd2e69e199e80529e046d1472060831d9c0463 | def undo(func):
' A decorator that wraps a given function to track the before\n and after states in the Undo table.\n '
def _decorator(self, *args, **kwargs):
track = kwargs.pop('track', True)
if track:
old = encode_task(self.todict())
func(self, *args, **kwargs)
new = encode_task(self.todict())
if (new != old):
Undo.objects.create(old=old, new=new, user=self.user)
else:
func(self, *args, **kwargs)
return _decorator | A decorator that wraps a given function to track the before
and after states in the Undo table. | django_task/task/models.py | undo | campbellr/taskweb | 5 | python | def undo(func):
' A decorator that wraps a given function to track the before\n and after states in the Undo table.\n '
def _decorator(self, *args, **kwargs):
track = kwargs.pop('track', True)
if track:
old = encode_task(self.todict())
func(self, *args, **kwargs)
new = encode_task(self.todict())
if (new != old):
Undo.objects.create(old=old, new=new, user=self.user)
else:
func(self, *args, **kwargs)
return _decorator | def undo(func):
' A decorator that wraps a given function to track the before\n and after states in the Undo table.\n '
def _decorator(self, *args, **kwargs):
track = kwargs.pop('track', True)
if track:
old = encode_task(self.todict())
func(self, *args, **kwargs)
new = encode_task(self.todict())
if (new != old):
Undo.objects.create(old=old, new=new, user=self.user)
else:
func(self, *args, **kwargs)
return _decorator<|docstring|>A decorator that wraps a given function to track the before
and after states in the Undo table.<|endoftext|> |
6f6eec746fecc1295cee76dfe9bd50a96100acf9b3daea56fb1366df473fed63 | def datetime2ts(dt):
' Convert a `datetime` object to unix timestamp (seconds since epoch).\n '
return int(time.mktime(dt.timetuple())) | Convert a `datetime` object to unix timestamp (seconds since epoch). | django_task/task/models.py | datetime2ts | campbellr/taskweb | 5 | python | def datetime2ts(dt):
' \n '
return int(time.mktime(dt.timetuple())) | def datetime2ts(dt):
' \n '
return int(time.mktime(dt.timetuple()))<|docstring|>Convert a `datetime` object to unix timestamp (seconds since epoch).<|endoftext|> |
12a4bec25f086407acbbdb932178b72f8589917d45de4e46bde36de8f7e2c51c | @classmethod
def serialize(cls):
' Serialze the table into a format expected by taskwarrior\n '
data = ''
for undo in cls.objects.all():
data += (u'time %s\n' % int(datetime2ts(undo.time)))
if undo.old:
data += (u'old %s' % undo.old)
data += (u'new %s' % undo.new)
data += u'---\n'
return data | Serialze the table into a format expected by taskwarrior | django_task/task/models.py | serialize | campbellr/taskweb | 5 | python | @classmethod
def serialize(cls):
' \n '
data =
for undo in cls.objects.all():
data += (u'time %s\n' % int(datetime2ts(undo.time)))
if undo.old:
data += (u'old %s' % undo.old)
data += (u'new %s' % undo.new)
data += u'---\n'
return data | @classmethod
def serialize(cls):
' \n '
data =
for undo in cls.objects.all():
data += (u'time %s\n' % int(datetime2ts(undo.time)))
if undo.old:
data += (u'old %s' % undo.old)
data += (u'new %s' % undo.new)
data += u'---\n'
return data<|docstring|>Serialze the table into a format expected by taskwarrior<|endoftext|> |
cededda9d85717e210be422a608586d3e50f02bd4e72fcd824d34f17169a6500 | def _is_dirty(self):
" Return True if the data in the model is 'dirty', or\n not flushed to the db.\n "
if self._get_dirty_fields():
return True
return False | Return True if the data in the model is 'dirty', or
not flushed to the db. | django_task/task/models.py | _is_dirty | campbellr/taskweb | 5 | python | def _is_dirty(self):
" Return True if the data in the model is 'dirty', or\n not flushed to the db.\n "
if self._get_dirty_fields():
return True
return False | def _is_dirty(self):
" Return True if the data in the model is 'dirty', or\n not flushed to the db.\n "
if self._get_dirty_fields():
return True
return False<|docstring|>Return True if the data in the model is 'dirty', or
not flushed to the db.<|endoftext|> |
1d04ae278aea5f135d023db777f77131278356e000211bb7df53853728caaa8e | def done(self):
' Mark a task as completed.\n '
self.status = 'completed'
self.end = datetime.datetime.now()
self.save() | Mark a task as completed. | django_task/task/models.py | done | campbellr/taskweb | 5 | python | def done(self):
' \n '
self.status = 'completed'
self.end = datetime.datetime.now()
self.save() | def done(self):
' \n '
self.status = 'completed'
self.end = datetime.datetime.now()
self.save()<|docstring|>Mark a task as completed.<|endoftext|> |
14aa0c61bbc0f4b29de42ad569aa4eec298329493e4763bb4c7adca632c5b3db | def save(self, *args, **kwargs):
" Automatically populate optional fields if they haven't been\n specified in __init__.\n "
track = kwargs.pop('track', True)
if (not self.uuid):
self.uuid = str(uuid.uuid4())
if (not self.status):
self.status = 'pending'
if (not self.entry):
self.entry = datetime.datetime.now()
if (not self.priority):
self.priority = Priority.objects.get_or_create(weight=0)[0]
data = {}
is_dirty = self._is_dirty()
if (self.pk and is_dirty):
old = self._original_state
data['old'] = encode_task(old)
super(Task, self).save(*args, **kwargs)
if (track and is_dirty):
data['new'] = encode_task(self.todict())
data['user'] = self.user
Undo.objects.create(**data)
self._original_state = self._as_dict() | Automatically populate optional fields if they haven't been
specified in __init__. | django_task/task/models.py | save | campbellr/taskweb | 5 | python | def save(self, *args, **kwargs):
" Automatically populate optional fields if they haven't been\n specified in __init__.\n "
track = kwargs.pop('track', True)
if (not self.uuid):
self.uuid = str(uuid.uuid4())
if (not self.status):
self.status = 'pending'
if (not self.entry):
self.entry = datetime.datetime.now()
if (not self.priority):
self.priority = Priority.objects.get_or_create(weight=0)[0]
data = {}
is_dirty = self._is_dirty()
if (self.pk and is_dirty):
old = self._original_state
data['old'] = encode_task(old)
super(Task, self).save(*args, **kwargs)
if (track and is_dirty):
data['new'] = encode_task(self.todict())
data['user'] = self.user
Undo.objects.create(**data)
self._original_state = self._as_dict() | def save(self, *args, **kwargs):
" Automatically populate optional fields if they haven't been\n specified in __init__.\n "
track = kwargs.pop('track', True)
if (not self.uuid):
self.uuid = str(uuid.uuid4())
if (not self.status):
self.status = 'pending'
if (not self.entry):
self.entry = datetime.datetime.now()
if (not self.priority):
self.priority = Priority.objects.get_or_create(weight=0)[0]
data = {}
is_dirty = self._is_dirty()
if (self.pk and is_dirty):
old = self._original_state
data['old'] = encode_task(old)
super(Task, self).save(*args, **kwargs)
if (track and is_dirty):
data['new'] = encode_task(self.todict())
data['user'] = self.user
Undo.objects.create(**data)
self._original_state = self._as_dict()<|docstring|>Automatically populate optional fields if they haven't been
specified in __init__.<|endoftext|> |
05942d6f5bd868d7e0b402d74f326a58331f3e44e884c52a5a80dbb301b6ad54 | @classmethod
def serialize(cls, status=None):
' Serialze the tasks to a string suitable for taskwarrior.\n '
if (status is None):
tasks = cls.objects.order_by('entry')
else:
tasks = cls.objects.filter(status=status).order_by('entry')
data = ''
for task in tasks:
data += encode_task(task.todict())
return data | Serialze the tasks to a string suitable for taskwarrior. | django_task/task/models.py | serialize | campbellr/taskweb | 5 | python | @classmethod
def serialize(cls, status=None):
' \n '
if (status is None):
tasks = cls.objects.order_by('entry')
else:
tasks = cls.objects.filter(status=status).order_by('entry')
data =
for task in tasks:
data += encode_task(task.todict())
return data | @classmethod
def serialize(cls, status=None):
' \n '
if (status is None):
tasks = cls.objects.order_by('entry')
else:
tasks = cls.objects.filter(status=status).order_by('entry')
data =
for task in tasks:
data += encode_task(task.todict())
return data<|docstring|>Serialze the tasks to a string suitable for taskwarrior.<|endoftext|> |
acc903b4e3aa94ec93024cd2ff397fcfc5e02ad8626465652e63e0a8c37cbc4a | def one_click_unsubscribe_link(user_profile, email_type):
'\n Generate a unique link that a logged-out user can visit to unsubscribe from\n Zulip e-mails without having to first log in.\n '
return create_confirmation_link(user_profile, user_profile.realm.host, Confirmation.UNSUBSCRIBE, url_args={'email_type': email_type}) | Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in. | zerver/lib/notifications.py | one_click_unsubscribe_link | ScorpionHat/zulip | 0 | python | def one_click_unsubscribe_link(user_profile, email_type):
'\n Generate a unique link that a logged-out user can visit to unsubscribe from\n Zulip e-mails without having to first log in.\n '
return create_confirmation_link(user_profile, user_profile.realm.host, Confirmation.UNSUBSCRIBE, url_args={'email_type': email_type}) | def one_click_unsubscribe_link(user_profile, email_type):
'\n Generate a unique link that a logged-out user can visit to unsubscribe from\n Zulip e-mails without having to first log in.\n '
return create_confirmation_link(user_profile, user_profile.realm.host, Confirmation.UNSUBSCRIBE, url_args={'email_type': email_type})<|docstring|>Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.<|endoftext|> |
ec663ea3bdf7b591417bdee9737946ece1c435d5b7f9d412e02aeda90ba87b00 | def build_message_list(user_profile, messages):
'\n Builds the message list object for the missed message email template.\n The messages are collapsed into per-recipient and per-sender blocks, like\n our web interface\n '
messages_to_render = []
def sender_string(message):
if (message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE)):
return message.sender.full_name
else:
return ''
def relative_to_full_url(content):
content = re.sub('/user_uploads/(\\S*)', (user_profile.realm.uri + '/user_uploads/\\1'), content)
content = re.sub('<img src=(\\S+)/user_uploads/(\\S+)>', '', content)
content = re.sub('/static/generated/emoji/images/emoji/', (user_profile.realm.uri + '/static/generated/emoji/images/emoji/'), content)
content = re.sub('/user_avatars/(\\d+)/emoji/', (user_profile.realm.uri + '/user_avatars/\\1/emoji/'), content)
content = re.sub('/#narrow/stream/', (user_profile.realm.uri + '/#narrow/stream/'), content)
return content
def fix_plaintext_image_urls(content):
return re.sub('\\[(\\S*)\\]\\((\\S*)\\)', '\\2', content)
def fix_emoji_sizes(html):
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert (message.rendered_content is not None)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
sender = sender_string(message)
return {'sender': sender, 'content': [build_message_payload(message)]}
def message_header(user_profile, message):
disp_recipient = get_display_recipient(message.recipient)
if (message.recipient.type == Recipient.PERSONAL):
header = (u'You and %s' % (message.sender.full_name,))
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
elif (message.recipient.type == Recipient.HUDDLE):
assert (not isinstance(disp_recipient, Text))
other_recipients = [r['full_name'] for r in disp_recipient if (r['email'] != user_profile.email)]
header = (u'You and %s' % (', '.join(other_recipients),))
html_link = pm_narrow_url(user_profile.realm, [r['email'] for r in disp_recipient if (r['email'] != user_profile.email)])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
else:
assert isinstance(disp_recipient, Text)
header = (u'%s > %s' % (disp_recipient, message.topic_name()))
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = (u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (stream_link, disp_recipient, topic_link, message.subject))
return {'plain': header, 'html': header_html, 'stream_message': (message.recipient.type_name() == 'stream')}
messages.sort(key=(lambda message: message.pub_date))
for message in messages:
header = message_header(user_profile, message)
if ((len(messages_to_render) > 0) and (messages_to_render[(- 1)]['header'] == header)):
sender = sender_string(message)
sender_block = messages_to_render[(- 1)]['senders']
if (sender_block[(- 1)]['sender'] == sender):
sender_block[(- 1)]['content'].append(build_message_payload(message))
else:
sender_block.append(build_sender_payload(message))
else:
recipient_block = {'header': header, 'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render | Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface | zerver/lib/notifications.py | build_message_list | ScorpionHat/zulip | 0 | python | def build_message_list(user_profile, messages):
'\n Builds the message list object for the missed message email template.\n The messages are collapsed into per-recipient and per-sender blocks, like\n our web interface\n '
messages_to_render = []
def sender_string(message):
if (message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE)):
return message.sender.full_name
else:
return
def relative_to_full_url(content):
content = re.sub('/user_uploads/(\\S*)', (user_profile.realm.uri + '/user_uploads/\\1'), content)
content = re.sub('<img src=(\\S+)/user_uploads/(\\S+)>', , content)
content = re.sub('/static/generated/emoji/images/emoji/', (user_profile.realm.uri + '/static/generated/emoji/images/emoji/'), content)
content = re.sub('/user_avatars/(\\d+)/emoji/', (user_profile.realm.uri + '/user_avatars/\\1/emoji/'), content)
content = re.sub('/#narrow/stream/', (user_profile.realm.uri + '/#narrow/stream/'), content)
return content
def fix_plaintext_image_urls(content):
return re.sub('\\[(\\S*)\\]\\((\\S*)\\)', '\\2', content)
def fix_emoji_sizes(html):
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert (message.rendered_content is not None)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
sender = sender_string(message)
return {'sender': sender, 'content': [build_message_payload(message)]}
def message_header(user_profile, message):
disp_recipient = get_display_recipient(message.recipient)
if (message.recipient.type == Recipient.PERSONAL):
header = (u'You and %s' % (message.sender.full_name,))
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
elif (message.recipient.type == Recipient.HUDDLE):
assert (not isinstance(disp_recipient, Text))
other_recipients = [r['full_name'] for r in disp_recipient if (r['email'] != user_profile.email)]
header = (u'You and %s' % (', '.join(other_recipients),))
html_link = pm_narrow_url(user_profile.realm, [r['email'] for r in disp_recipient if (r['email'] != user_profile.email)])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
else:
assert isinstance(disp_recipient, Text)
header = (u'%s > %s' % (disp_recipient, message.topic_name()))
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = (u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (stream_link, disp_recipient, topic_link, message.subject))
return {'plain': header, 'html': header_html, 'stream_message': (message.recipient.type_name() == 'stream')}
messages.sort(key=(lambda message: message.pub_date))
for message in messages:
header = message_header(user_profile, message)
if ((len(messages_to_render) > 0) and (messages_to_render[(- 1)]['header'] == header)):
sender = sender_string(message)
sender_block = messages_to_render[(- 1)]['senders']
if (sender_block[(- 1)]['sender'] == sender):
sender_block[(- 1)]['content'].append(build_message_payload(message))
else:
sender_block.append(build_sender_payload(message))
else:
recipient_block = {'header': header, 'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render | def build_message_list(user_profile, messages):
'\n Builds the message list object for the missed message email template.\n The messages are collapsed into per-recipient and per-sender blocks, like\n our web interface\n '
messages_to_render = []
def sender_string(message):
if (message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE)):
return message.sender.full_name
else:
return
def relative_to_full_url(content):
content = re.sub('/user_uploads/(\\S*)', (user_profile.realm.uri + '/user_uploads/\\1'), content)
content = re.sub('<img src=(\\S+)/user_uploads/(\\S+)>', , content)
content = re.sub('/static/generated/emoji/images/emoji/', (user_profile.realm.uri + '/static/generated/emoji/images/emoji/'), content)
content = re.sub('/user_avatars/(\\d+)/emoji/', (user_profile.realm.uri + '/user_avatars/\\1/emoji/'), content)
content = re.sub('/#narrow/stream/', (user_profile.realm.uri + '/#narrow/stream/'), content)
return content
def fix_plaintext_image_urls(content):
return re.sub('\\[(\\S*)\\]\\((\\S*)\\)', '\\2', content)
def fix_emoji_sizes(html):
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert (message.rendered_content is not None)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
sender = sender_string(message)
return {'sender': sender, 'content': [build_message_payload(message)]}
def message_header(user_profile, message):
disp_recipient = get_display_recipient(message.recipient)
if (message.recipient.type == Recipient.PERSONAL):
header = (u'You and %s' % (message.sender.full_name,))
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
elif (message.recipient.type == Recipient.HUDDLE):
assert (not isinstance(disp_recipient, Text))
other_recipients = [r['full_name'] for r in disp_recipient if (r['email'] != user_profile.email)]
header = (u'You and %s' % (', '.join(other_recipients),))
html_link = pm_narrow_url(user_profile.realm, [r['email'] for r in disp_recipient if (r['email'] != user_profile.email)])
header_html = (u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header))
else:
assert isinstance(disp_recipient, Text)
header = (u'%s > %s' % (disp_recipient, message.topic_name()))
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = (u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (stream_link, disp_recipient, topic_link, message.subject))
return {'plain': header, 'html': header_html, 'stream_message': (message.recipient.type_name() == 'stream')}
messages.sort(key=(lambda message: message.pub_date))
for message in messages:
header = message_header(user_profile, message)
if ((len(messages_to_render) > 0) and (messages_to_render[(- 1)]['header'] == header)):
sender = sender_string(message)
sender_block = messages_to_render[(- 1)]['senders']
if (sender_block[(- 1)]['sender'] == sender):
sender_block[(- 1)]['content'].append(build_message_payload(message))
else:
sender_block.append(build_sender_payload(message))
else:
recipient_block = {'header': header, 'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render<|docstring|>Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface<|endoftext|> |
156b7b8c08d2eb63b02651ea5c72f2d1e0ca7e51950d2e946a012d9c0e3bc692 | @statsd_increment('missed_message_reminders')
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
"\n Send a reminder email to a user if she's missed some PMs by being offline.\n\n The email will have its reply to address set to a limited used email\n address that will send a zulip message to the correct recipient. This\n allows the user to respond to missed PMs, huddles, and @-mentions directly\n from the email.\n\n `user_profile` is the user to send the reminder to\n `missed_messages` is a list of Message objects to remind about they should\n all have the same recipient and subject\n "
from zerver.context_processors import common_context
if (not user_profile.enable_offline_email_notifications):
return
recipients = set(((msg.recipient_id, msg.subject) for msg in missed_messages))
if (len(recipients) != 1):
raise ValueError(('All missed_messages must have the same recipient and subject %r' % recipients))
unsubscribe_link = one_click_unsubscribe_link(user_profile, 'missed_messages')
context = common_context(user_profile)
context.update({'name': user_profile.full_name, 'messages': build_message_list(user_profile, missed_messages), 'message_count': message_count, 'mention': (missed_messages[0].recipient.type == Recipient.STREAM), 'unsubscribe_link': unsubscribe_link})
if settings.EMAIL_GATEWAY_PATTERN:
context.update({'reply_warning': False, 'reply_to_zulip': True})
else:
context.update({'reply_warning': True, 'reply_to_zulip': False})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0])
if (reply_to_address == FromAddress.NOREPLY):
reply_to_name = None
else:
reply_to_name = 'Zulip'
senders = list(set((m.sender for m in missed_messages)))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
assert (not isinstance(display_recipient, Text))
other_recipients = [r['full_name'] for r in display_recipient if (r['id'] != user_profile.id)]
context.update({'group_pm': True})
if (len(other_recipients) == 2):
huddle_display_name = (u'%s' % ' and '.join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif (len(other_recipients) == 3):
huddle_display_name = (u'%s, %s, and %s' % (other_recipients[0], other_recipients[1], other_recipients[2]))
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = (u'%s, and %s others' % (', '.join(other_recipients[:2]), (len(other_recipients) - 2)))
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
senders = list(set((m.sender for m in missed_messages if UserMessage.objects.filter(message=m, user_profile=user_profile, flags=UserMessage.flags.mentioned).exists())))
context.update({'at_mention': True})
context.update({'sender_str': ', '.join((sender.full_name for sender in senders)), 'realm_str': user_profile.realm.name})
from_name = 'Zulip Missed Messages'
from_address = FromAddress.NOREPLY
if ((len(senders) == 1) and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER):
sender = senders[0]
(from_name, from_address) = (sender.full_name, sender.email)
context.update({'reply_warning': False, 'reply_to_zulip': False})
email_dict = {'template_prefix': 'zerver/emails/missed_message', 'to_user_id': user_profile.id, 'from_name': from_name, 'from_address': from_address, 'reply_to_email': formataddr((reply_to_name, reply_to_address)), 'context': context}
queue_json_publish('missedmessage_email_senders', email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder']) | Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject | zerver/lib/notifications.py | do_send_missedmessage_events_reply_in_zulip | ScorpionHat/zulip | 0 | python | @statsd_increment('missed_message_reminders')
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
"\n Send a reminder email to a user if she's missed some PMs by being offline.\n\n The email will have its reply to address set to a limited used email\n address that will send a zulip message to the correct recipient. This\n allows the user to respond to missed PMs, huddles, and @-mentions directly\n from the email.\n\n `user_profile` is the user to send the reminder to\n `missed_messages` is a list of Message objects to remind about they should\n all have the same recipient and subject\n "
from zerver.context_processors import common_context
if (not user_profile.enable_offline_email_notifications):
return
recipients = set(((msg.recipient_id, msg.subject) for msg in missed_messages))
if (len(recipients) != 1):
raise ValueError(('All missed_messages must have the same recipient and subject %r' % recipients))
unsubscribe_link = one_click_unsubscribe_link(user_profile, 'missed_messages')
context = common_context(user_profile)
context.update({'name': user_profile.full_name, 'messages': build_message_list(user_profile, missed_messages), 'message_count': message_count, 'mention': (missed_messages[0].recipient.type == Recipient.STREAM), 'unsubscribe_link': unsubscribe_link})
if settings.EMAIL_GATEWAY_PATTERN:
context.update({'reply_warning': False, 'reply_to_zulip': True})
else:
context.update({'reply_warning': True, 'reply_to_zulip': False})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0])
if (reply_to_address == FromAddress.NOREPLY):
reply_to_name = None
else:
reply_to_name = 'Zulip'
senders = list(set((m.sender for m in missed_messages)))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
assert (not isinstance(display_recipient, Text))
other_recipients = [r['full_name'] for r in display_recipient if (r['id'] != user_profile.id)]
context.update({'group_pm': True})
if (len(other_recipients) == 2):
huddle_display_name = (u'%s' % ' and '.join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif (len(other_recipients) == 3):
huddle_display_name = (u'%s, %s, and %s' % (other_recipients[0], other_recipients[1], other_recipients[2]))
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = (u'%s, and %s others' % (', '.join(other_recipients[:2]), (len(other_recipients) - 2)))
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
senders = list(set((m.sender for m in missed_messages if UserMessage.objects.filter(message=m, user_profile=user_profile, flags=UserMessage.flags.mentioned).exists())))
context.update({'at_mention': True})
context.update({'sender_str': ', '.join((sender.full_name for sender in senders)), 'realm_str': user_profile.realm.name})
from_name = 'Zulip Missed Messages'
from_address = FromAddress.NOREPLY
if ((len(senders) == 1) and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER):
sender = senders[0]
(from_name, from_address) = (sender.full_name, sender.email)
context.update({'reply_warning': False, 'reply_to_zulip': False})
email_dict = {'template_prefix': 'zerver/emails/missed_message', 'to_user_id': user_profile.id, 'from_name': from_name, 'from_address': from_address, 'reply_to_email': formataddr((reply_to_name, reply_to_address)), 'context': context}
queue_json_publish('missedmessage_email_senders', email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder']) | @statsd_increment('missed_message_reminders')
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
"\n Send a reminder email to a user if she's missed some PMs by being offline.\n\n The email will have its reply to address set to a limited used email\n address that will send a zulip message to the correct recipient. This\n allows the user to respond to missed PMs, huddles, and @-mentions directly\n from the email.\n\n `user_profile` is the user to send the reminder to\n `missed_messages` is a list of Message objects to remind about they should\n all have the same recipient and subject\n "
from zerver.context_processors import common_context
if (not user_profile.enable_offline_email_notifications):
return
recipients = set(((msg.recipient_id, msg.subject) for msg in missed_messages))
if (len(recipients) != 1):
raise ValueError(('All missed_messages must have the same recipient and subject %r' % recipients))
unsubscribe_link = one_click_unsubscribe_link(user_profile, 'missed_messages')
context = common_context(user_profile)
context.update({'name': user_profile.full_name, 'messages': build_message_list(user_profile, missed_messages), 'message_count': message_count, 'mention': (missed_messages[0].recipient.type == Recipient.STREAM), 'unsubscribe_link': unsubscribe_link})
if settings.EMAIL_GATEWAY_PATTERN:
context.update({'reply_warning': False, 'reply_to_zulip': True})
else:
context.update({'reply_warning': True, 'reply_to_zulip': False})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0])
if (reply_to_address == FromAddress.NOREPLY):
reply_to_name = None
else:
reply_to_name = 'Zulip'
senders = list(set((m.sender for m in missed_messages)))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
assert (not isinstance(display_recipient, Text))
other_recipients = [r['full_name'] for r in display_recipient if (r['id'] != user_profile.id)]
context.update({'group_pm': True})
if (len(other_recipients) == 2):
huddle_display_name = (u'%s' % ' and '.join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif (len(other_recipients) == 3):
huddle_display_name = (u'%s, %s, and %s' % (other_recipients[0], other_recipients[1], other_recipients[2]))
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = (u'%s, and %s others' % (', '.join(other_recipients[:2]), (len(other_recipients) - 2)))
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
senders = list(set((m.sender for m in missed_messages if UserMessage.objects.filter(message=m, user_profile=user_profile, flags=UserMessage.flags.mentioned).exists())))
context.update({'at_mention': True})
context.update({'sender_str': ', '.join((sender.full_name for sender in senders)), 'realm_str': user_profile.realm.name})
from_name = 'Zulip Missed Messages'
from_address = FromAddress.NOREPLY
if ((len(senders) == 1) and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER):
sender = senders[0]
(from_name, from_address) = (sender.full_name, sender.email)
context.update({'reply_warning': False, 'reply_to_zulip': False})
email_dict = {'template_prefix': 'zerver/emails/missed_message', 'to_user_id': user_profile.id, 'from_name': from_name, 'from_address': from_address, 'reply_to_email': formataddr((reply_to_name, reply_to_address)), 'context': context}
queue_json_publish('missedmessage_email_senders', email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])<|docstring|>Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject<|endoftext|> |
7a7b1b4c5f19ebe2b3b45bdcee347c332e8f81b5b7f8d18995e88dd055d0d5c2 | def __init__(self, id=None, project_usages=None, library_variable_set_usages=None, tenant_usages=None, deployment_target_usages=None, last_modified_on=None, last_modified_by=None, links=None):
'CertificateUsageResource - a model defined in Swagger'
self._id = None
self._project_usages = None
self._library_variable_set_usages = None
self._tenant_usages = None
self._deployment_target_usages = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if (id is not None):
self.id = id
if (project_usages is not None):
self.project_usages = project_usages
if (library_variable_set_usages is not None):
self.library_variable_set_usages = library_variable_set_usages
if (tenant_usages is not None):
self.tenant_usages = tenant_usages
if (deployment_target_usages is not None):
self.deployment_target_usages = deployment_target_usages
if (last_modified_on is not None):
self.last_modified_on = last_modified_on
if (last_modified_by is not None):
self.last_modified_by = last_modified_by
if (links is not None):
self.links = links | CertificateUsageResource - a model defined in Swagger | octopus_deploy_swagger_client/models/certificate_usage_resource.py | __init__ | cvent/octopus-deploy-api-client | 0 | python | def __init__(self, id=None, project_usages=None, library_variable_set_usages=None, tenant_usages=None, deployment_target_usages=None, last_modified_on=None, last_modified_by=None, links=None):
self._id = None
self._project_usages = None
self._library_variable_set_usages = None
self._tenant_usages = None
self._deployment_target_usages = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if (id is not None):
self.id = id
if (project_usages is not None):
self.project_usages = project_usages
if (library_variable_set_usages is not None):
self.library_variable_set_usages = library_variable_set_usages
if (tenant_usages is not None):
self.tenant_usages = tenant_usages
if (deployment_target_usages is not None):
self.deployment_target_usages = deployment_target_usages
if (last_modified_on is not None):
self.last_modified_on = last_modified_on
if (last_modified_by is not None):
self.last_modified_by = last_modified_by
if (links is not None):
self.links = links | def __init__(self, id=None, project_usages=None, library_variable_set_usages=None, tenant_usages=None, deployment_target_usages=None, last_modified_on=None, last_modified_by=None, links=None):
self._id = None
self._project_usages = None
self._library_variable_set_usages = None
self._tenant_usages = None
self._deployment_target_usages = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if (id is not None):
self.id = id
if (project_usages is not None):
self.project_usages = project_usages
if (library_variable_set_usages is not None):
self.library_variable_set_usages = library_variable_set_usages
if (tenant_usages is not None):
self.tenant_usages = tenant_usages
if (deployment_target_usages is not None):
self.deployment_target_usages = deployment_target_usages
if (last_modified_on is not None):
self.last_modified_on = last_modified_on
if (last_modified_by is not None):
self.last_modified_by = last_modified_by
if (links is not None):
self.links = links<|docstring|>CertificateUsageResource - a model defined in Swagger<|endoftext|> |
f394aeec90580b33d63709ab3f512a4bcaee907ed4f18c48781087d8a11ce62d | @property
def id(self):
'Gets the id of this CertificateUsageResource. # noqa: E501\n\n\n :return: The id of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._id | Gets the id of this CertificateUsageResource. # noqa: E501
:return: The id of this CertificateUsageResource. # noqa: E501
:rtype: str | octopus_deploy_swagger_client/models/certificate_usage_resource.py | id | cvent/octopus-deploy-api-client | 0 | python | @property
def id(self):
'Gets the id of this CertificateUsageResource. # noqa: E501\n\n\n :return: The id of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._id | @property
def id(self):
'Gets the id of this CertificateUsageResource. # noqa: E501\n\n\n :return: The id of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._id<|docstring|>Gets the id of this CertificateUsageResource. # noqa: E501
:return: The id of this CertificateUsageResource. # noqa: E501
:rtype: str<|endoftext|> |
8a064a6e43bce06c36ddd6c66fb3a657d2786f7fa5510df551db03fbfe793364 | @id.setter
def id(self, id):
'Sets the id of this CertificateUsageResource.\n\n\n :param id: The id of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._id = id | Sets the id of this CertificateUsageResource.
:param id: The id of this CertificateUsageResource. # noqa: E501
:type: str | octopus_deploy_swagger_client/models/certificate_usage_resource.py | id | cvent/octopus-deploy-api-client | 0 | python | @id.setter
def id(self, id):
'Sets the id of this CertificateUsageResource.\n\n\n :param id: The id of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._id = id | @id.setter
def id(self, id):
'Sets the id of this CertificateUsageResource.\n\n\n :param id: The id of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._id = id<|docstring|>Sets the id of this CertificateUsageResource.
:param id: The id of this CertificateUsageResource. # noqa: E501
:type: str<|endoftext|> |
2a7c3edca81bf26a1fb9bf324cbcc52bc983db61c3741d5c228d815516fd8511 | @property
def project_usages(self):
'Gets the project_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The project_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[ProjectResource]\n '
return self._project_usages | Gets the project_usages of this CertificateUsageResource. # noqa: E501
:return: The project_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[ProjectResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | project_usages | cvent/octopus-deploy-api-client | 0 | python | @property
def project_usages(self):
'Gets the project_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The project_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[ProjectResource]\n '
return self._project_usages | @property
def project_usages(self):
'Gets the project_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The project_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[ProjectResource]\n '
return self._project_usages<|docstring|>Gets the project_usages of this CertificateUsageResource. # noqa: E501
:return: The project_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[ProjectResource]<|endoftext|> |
ff7a06ff786e275e1214f04fbc6c607d3f360150a64a572f644eac48a83cc50a | @project_usages.setter
def project_usages(self, project_usages):
'Sets the project_usages of this CertificateUsageResource.\n\n\n :param project_usages: The project_usages of this CertificateUsageResource. # noqa: E501\n :type: list[ProjectResource]\n '
self._project_usages = project_usages | Sets the project_usages of this CertificateUsageResource.
:param project_usages: The project_usages of this CertificateUsageResource. # noqa: E501
:type: list[ProjectResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | project_usages | cvent/octopus-deploy-api-client | 0 | python | @project_usages.setter
def project_usages(self, project_usages):
'Sets the project_usages of this CertificateUsageResource.\n\n\n :param project_usages: The project_usages of this CertificateUsageResource. # noqa: E501\n :type: list[ProjectResource]\n '
self._project_usages = project_usages | @project_usages.setter
def project_usages(self, project_usages):
'Sets the project_usages of this CertificateUsageResource.\n\n\n :param project_usages: The project_usages of this CertificateUsageResource. # noqa: E501\n :type: list[ProjectResource]\n '
self._project_usages = project_usages<|docstring|>Sets the project_usages of this CertificateUsageResource.
:param project_usages: The project_usages of this CertificateUsageResource. # noqa: E501
:type: list[ProjectResource]<|endoftext|> |
b4920dcb1d3ee8891118c678ac7b168f052567eec6dd67d17f136639c78a2cd4 | @property
def library_variable_set_usages(self):
'Gets the library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[LibraryVariableSetResource]\n '
return self._library_variable_set_usages | Gets the library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:return: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[LibraryVariableSetResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | library_variable_set_usages | cvent/octopus-deploy-api-client | 0 | python | @property
def library_variable_set_usages(self):
'Gets the library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[LibraryVariableSetResource]\n '
return self._library_variable_set_usages | @property
def library_variable_set_usages(self):
'Gets the library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[LibraryVariableSetResource]\n '
return self._library_variable_set_usages<|docstring|>Gets the library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:return: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[LibraryVariableSetResource]<|endoftext|> |
bff525611227c9461e142d5235279c50c94748192d95524741526423989843da | @library_variable_set_usages.setter
def library_variable_set_usages(self, library_variable_set_usages):
'Sets the library_variable_set_usages of this CertificateUsageResource.\n\n\n :param library_variable_set_usages: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :type: list[LibraryVariableSetResource]\n '
self._library_variable_set_usages = library_variable_set_usages | Sets the library_variable_set_usages of this CertificateUsageResource.
:param library_variable_set_usages: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:type: list[LibraryVariableSetResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | library_variable_set_usages | cvent/octopus-deploy-api-client | 0 | python | @library_variable_set_usages.setter
def library_variable_set_usages(self, library_variable_set_usages):
'Sets the library_variable_set_usages of this CertificateUsageResource.\n\n\n :param library_variable_set_usages: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :type: list[LibraryVariableSetResource]\n '
self._library_variable_set_usages = library_variable_set_usages | @library_variable_set_usages.setter
def library_variable_set_usages(self, library_variable_set_usages):
'Sets the library_variable_set_usages of this CertificateUsageResource.\n\n\n :param library_variable_set_usages: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501\n :type: list[LibraryVariableSetResource]\n '
self._library_variable_set_usages = library_variable_set_usages<|docstring|>Sets the library_variable_set_usages of this CertificateUsageResource.
:param library_variable_set_usages: The library_variable_set_usages of this CertificateUsageResource. # noqa: E501
:type: list[LibraryVariableSetResource]<|endoftext|> |
425958a85bc75830e7de8c3bc7acb1843169293a7020b89aeb64e968197735ab | @property
def tenant_usages(self):
'Gets the tenant_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[TenantResource]\n '
return self._tenant_usages | Gets the tenant_usages of this CertificateUsageResource. # noqa: E501
:return: The tenant_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[TenantResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | tenant_usages | cvent/octopus-deploy-api-client | 0 | python | @property
def tenant_usages(self):
'Gets the tenant_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[TenantResource]\n '
return self._tenant_usages | @property
def tenant_usages(self):
'Gets the tenant_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[TenantResource]\n '
return self._tenant_usages<|docstring|>Gets the tenant_usages of this CertificateUsageResource. # noqa: E501
:return: The tenant_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[TenantResource]<|endoftext|> |
da57ea0503badf82751181e1997a1470b461c670395b312d34d2b69803617ffb | @tenant_usages.setter
def tenant_usages(self, tenant_usages):
'Sets the tenant_usages of this CertificateUsageResource.\n\n\n :param tenant_usages: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :type: list[TenantResource]\n '
self._tenant_usages = tenant_usages | Sets the tenant_usages of this CertificateUsageResource.
:param tenant_usages: The tenant_usages of this CertificateUsageResource. # noqa: E501
:type: list[TenantResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | tenant_usages | cvent/octopus-deploy-api-client | 0 | python | @tenant_usages.setter
def tenant_usages(self, tenant_usages):
'Sets the tenant_usages of this CertificateUsageResource.\n\n\n :param tenant_usages: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :type: list[TenantResource]\n '
self._tenant_usages = tenant_usages | @tenant_usages.setter
def tenant_usages(self, tenant_usages):
'Sets the tenant_usages of this CertificateUsageResource.\n\n\n :param tenant_usages: The tenant_usages of this CertificateUsageResource. # noqa: E501\n :type: list[TenantResource]\n '
self._tenant_usages = tenant_usages<|docstring|>Sets the tenant_usages of this CertificateUsageResource.
:param tenant_usages: The tenant_usages of this CertificateUsageResource. # noqa: E501
:type: list[TenantResource]<|endoftext|> |
219c56623b7deabb624f293a8664da87129a5647f82374d0971ccf9d1e092449 | @property
def deployment_target_usages(self):
'Gets the deployment_target_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[DeploymentTargetResource]\n '
return self._deployment_target_usages | Gets the deployment_target_usages of this CertificateUsageResource. # noqa: E501
:return: The deployment_target_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[DeploymentTargetResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | deployment_target_usages | cvent/octopus-deploy-api-client | 0 | python | @property
def deployment_target_usages(self):
'Gets the deployment_target_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[DeploymentTargetResource]\n '
return self._deployment_target_usages | @property
def deployment_target_usages(self):
'Gets the deployment_target_usages of this CertificateUsageResource. # noqa: E501\n\n\n :return: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :rtype: list[DeploymentTargetResource]\n '
return self._deployment_target_usages<|docstring|>Gets the deployment_target_usages of this CertificateUsageResource. # noqa: E501
:return: The deployment_target_usages of this CertificateUsageResource. # noqa: E501
:rtype: list[DeploymentTargetResource]<|endoftext|> |
68702ee64b4088e24c933a25b52e8b782071c628bdf4eef584ac1a4dc5b666ee | @deployment_target_usages.setter
def deployment_target_usages(self, deployment_target_usages):
'Sets the deployment_target_usages of this CertificateUsageResource.\n\n\n :param deployment_target_usages: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :type: list[DeploymentTargetResource]\n '
self._deployment_target_usages = deployment_target_usages | Sets the deployment_target_usages of this CertificateUsageResource.
:param deployment_target_usages: The deployment_target_usages of this CertificateUsageResource. # noqa: E501
:type: list[DeploymentTargetResource] | octopus_deploy_swagger_client/models/certificate_usage_resource.py | deployment_target_usages | cvent/octopus-deploy-api-client | 0 | python | @deployment_target_usages.setter
def deployment_target_usages(self, deployment_target_usages):
'Sets the deployment_target_usages of this CertificateUsageResource.\n\n\n :param deployment_target_usages: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :type: list[DeploymentTargetResource]\n '
self._deployment_target_usages = deployment_target_usages | @deployment_target_usages.setter
def deployment_target_usages(self, deployment_target_usages):
'Sets the deployment_target_usages of this CertificateUsageResource.\n\n\n :param deployment_target_usages: The deployment_target_usages of this CertificateUsageResource. # noqa: E501\n :type: list[DeploymentTargetResource]\n '
self._deployment_target_usages = deployment_target_usages<|docstring|>Sets the deployment_target_usages of this CertificateUsageResource.
:param deployment_target_usages: The deployment_target_usages of this CertificateUsageResource. # noqa: E501
:type: list[DeploymentTargetResource]<|endoftext|> |
bfb187b63cabd1f1613ea1ad49d688191f458f918aabbae50628db91a98db020 | @property
def last_modified_on(self):
'Gets the last_modified_on of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :rtype: datetime\n '
return self._last_modified_on | Gets the last_modified_on of this CertificateUsageResource. # noqa: E501
:return: The last_modified_on of this CertificateUsageResource. # noqa: E501
:rtype: datetime | octopus_deploy_swagger_client/models/certificate_usage_resource.py | last_modified_on | cvent/octopus-deploy-api-client | 0 | python | @property
def last_modified_on(self):
'Gets the last_modified_on of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :rtype: datetime\n '
return self._last_modified_on | @property
def last_modified_on(self):
'Gets the last_modified_on of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :rtype: datetime\n '
return self._last_modified_on<|docstring|>Gets the last_modified_on of this CertificateUsageResource. # noqa: E501
:return: The last_modified_on of this CertificateUsageResource. # noqa: E501
:rtype: datetime<|endoftext|> |
b435b2423db9ca5569c78d007332797e480f864ec8e4ae6cf70376bcdad6f92f | @last_modified_on.setter
def last_modified_on(self, last_modified_on):
'Sets the last_modified_on of this CertificateUsageResource.\n\n\n :param last_modified_on: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :type: datetime\n '
self._last_modified_on = last_modified_on | Sets the last_modified_on of this CertificateUsageResource.
:param last_modified_on: The last_modified_on of this CertificateUsageResource. # noqa: E501
:type: datetime | octopus_deploy_swagger_client/models/certificate_usage_resource.py | last_modified_on | cvent/octopus-deploy-api-client | 0 | python | @last_modified_on.setter
def last_modified_on(self, last_modified_on):
'Sets the last_modified_on of this CertificateUsageResource.\n\n\n :param last_modified_on: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :type: datetime\n '
self._last_modified_on = last_modified_on | @last_modified_on.setter
def last_modified_on(self, last_modified_on):
'Sets the last_modified_on of this CertificateUsageResource.\n\n\n :param last_modified_on: The last_modified_on of this CertificateUsageResource. # noqa: E501\n :type: datetime\n '
self._last_modified_on = last_modified_on<|docstring|>Sets the last_modified_on of this CertificateUsageResource.
:param last_modified_on: The last_modified_on of this CertificateUsageResource. # noqa: E501
:type: datetime<|endoftext|> |
f8036c85a101cd99ffd7146379c6fcdd94590b3d88ebd03958adf7afa9730ca7 | @property
def last_modified_by(self):
'Gets the last_modified_by of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._last_modified_by | Gets the last_modified_by of this CertificateUsageResource. # noqa: E501
:return: The last_modified_by of this CertificateUsageResource. # noqa: E501
:rtype: str | octopus_deploy_swagger_client/models/certificate_usage_resource.py | last_modified_by | cvent/octopus-deploy-api-client | 0 | python | @property
def last_modified_by(self):
'Gets the last_modified_by of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._last_modified_by | @property
def last_modified_by(self):
'Gets the last_modified_by of this CertificateUsageResource. # noqa: E501\n\n\n :return: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :rtype: str\n '
return self._last_modified_by<|docstring|>Gets the last_modified_by of this CertificateUsageResource. # noqa: E501
:return: The last_modified_by of this CertificateUsageResource. # noqa: E501
:rtype: str<|endoftext|> |
ef7ee55635b5cdda6b7c5e160d27947454755a83e58e422d320ad6aae06576e9 | @last_modified_by.setter
def last_modified_by(self, last_modified_by):
'Sets the last_modified_by of this CertificateUsageResource.\n\n\n :param last_modified_by: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._last_modified_by = last_modified_by | Sets the last_modified_by of this CertificateUsageResource.
:param last_modified_by: The last_modified_by of this CertificateUsageResource. # noqa: E501
:type: str | octopus_deploy_swagger_client/models/certificate_usage_resource.py | last_modified_by | cvent/octopus-deploy-api-client | 0 | python | @last_modified_by.setter
def last_modified_by(self, last_modified_by):
'Sets the last_modified_by of this CertificateUsageResource.\n\n\n :param last_modified_by: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._last_modified_by = last_modified_by | @last_modified_by.setter
def last_modified_by(self, last_modified_by):
'Sets the last_modified_by of this CertificateUsageResource.\n\n\n :param last_modified_by: The last_modified_by of this CertificateUsageResource. # noqa: E501\n :type: str\n '
self._last_modified_by = last_modified_by<|docstring|>Sets the last_modified_by of this CertificateUsageResource.
:param last_modified_by: The last_modified_by of this CertificateUsageResource. # noqa: E501
:type: str<|endoftext|> |
d7028b6bc828cbf9e4b299bd2af86e79e93ad877f44de414ee3bf412b0c41085 | @property
def links(self):
'Gets the links of this CertificateUsageResource. # noqa: E501\n\n\n :return: The links of this CertificateUsageResource. # noqa: E501\n :rtype: dict(str, str)\n '
return self._links | Gets the links of this CertificateUsageResource. # noqa: E501
:return: The links of this CertificateUsageResource. # noqa: E501
:rtype: dict(str, str) | octopus_deploy_swagger_client/models/certificate_usage_resource.py | links | cvent/octopus-deploy-api-client | 0 | python | @property
def links(self):
'Gets the links of this CertificateUsageResource. # noqa: E501\n\n\n :return: The links of this CertificateUsageResource. # noqa: E501\n :rtype: dict(str, str)\n '
return self._links | @property
def links(self):
'Gets the links of this CertificateUsageResource. # noqa: E501\n\n\n :return: The links of this CertificateUsageResource. # noqa: E501\n :rtype: dict(str, str)\n '
return self._links<|docstring|>Gets the links of this CertificateUsageResource. # noqa: E501
:return: The links of this CertificateUsageResource. # noqa: E501
:rtype: dict(str, str)<|endoftext|> |
459d86b73d17c1f6af78a356c2acc00eb52a1dc9f041813f1679e68753215442 | @links.setter
def links(self, links):
'Sets the links of this CertificateUsageResource.\n\n\n :param links: The links of this CertificateUsageResource. # noqa: E501\n :type: dict(str, str)\n '
self._links = links | Sets the links of this CertificateUsageResource.
:param links: The links of this CertificateUsageResource. # noqa: E501
:type: dict(str, str) | octopus_deploy_swagger_client/models/certificate_usage_resource.py | links | cvent/octopus-deploy-api-client | 0 | python | @links.setter
def links(self, links):
'Sets the links of this CertificateUsageResource.\n\n\n :param links: The links of this CertificateUsageResource. # noqa: E501\n :type: dict(str, str)\n '
self._links = links | @links.setter
def links(self, links):
'Sets the links of this CertificateUsageResource.\n\n\n :param links: The links of this CertificateUsageResource. # noqa: E501\n :type: dict(str, str)\n '
self._links = links<|docstring|>Sets the links of this CertificateUsageResource.
:param links: The links of this CertificateUsageResource. # noqa: E501
:type: dict(str, str)<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.