body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
3709ed8950af21a0bed749254f870b3a03f82b764aaddf9674f5428632f8ca2d
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'data': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), 'name': (str,)}
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
clients/client/python/ory_client/model/schema_patch.py
openapi_types
ory/sdk
77
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'data': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), 'name': (str,)}
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'data': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), 'name': (str,)}<|docstring|>This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.<|endoftext|>
7416d08f34534575efe630dd31ab1d3530ae39f530261da0b319ab97c4146bb6
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
SchemaPatch - a model defined in OpenAPI Args: data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema name (str): The user defined schema name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
clients/client/python/ory_client/model/schema_patch.py
_from_openapi_data
ory/sdk
77
python
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self<|docstring|>SchemaPatch - a model defined in OpenAPI Args: data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema name (str): The user defined schema name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)<|endoftext|>
cb2567eeb3ce0c408b95ce7a9e138a690e11c4e6a78e70a4eb20129f03ec1cf7
@convert_js_args_to_python_args def __init__(self, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
SchemaPatch - a model defined in OpenAPI Args: data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema name (str): The user defined schema name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
clients/client/python/ory_client/model/schema_patch.py
__init__
ory/sdk
77
python
@convert_js_args_to_python_args def __init__(self, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
@convert_js_args_to_python_args def __init__(self, data, name, *args, **kwargs): 'SchemaPatch - a model defined in OpenAPI\n\n Args:\n data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema\n name (str): The user defined schema name\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.data = data self.name = name for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')<|docstring|>SchemaPatch - a model defined in OpenAPI Args: data ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The json schema name (str): The user defined schema name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)<|endoftext|>
82cae52bd5a4555b36ab1487725ebee1e498b03e715ada7162bfd773db9ec2a6
@task def info(ctx): 'Show information about the current Python and environment.' version = get_version() suffix = get_suffix() print(f'Python being used: {PYTHONBIN}') print(f'Python extension suffix: {suffix}') print(f'Package version: {version}') venv = get_virtualenv() if venv: print(f'Virtual environment:', venv)
Show information about the current Python and environment.
tasks.py
info
pycopia/devtest
0
python
@task def info(ctx): version = get_version() suffix = get_suffix() print(f'Python being used: {PYTHONBIN}') print(f'Python extension suffix: {suffix}') print(f'Package version: {version}') venv = get_virtualenv() if venv: print(f'Virtual environment:', venv)
@task def info(ctx): version = get_version() suffix = get_suffix() print(f'Python being used: {PYTHONBIN}') print(f'Python extension suffix: {suffix}') print(f'Package version: {version}') venv = get_virtualenv() if venv: print(f'Virtual environment:', venv)<|docstring|>Show information about the current Python and environment.<|endoftext|>
565aefb526a63ca7d432dcf9c3ecc251a68cd33ce8d8775bb4826e1b0cbced52
@task def flake8(ctx, pathname='devtest'): 'Run flake8 linter on the package.' ctx.run(f'{PYTHONBIN} -m flake8 {pathname}')
Run flake8 linter on the package.
tasks.py
flake8
pycopia/devtest
0
python
@task def flake8(ctx, pathname='devtest'): ctx.run(f'{PYTHONBIN} -m flake8 {pathname}')
@task def flake8(ctx, pathname='devtest'): ctx.run(f'{PYTHONBIN} -m flake8 {pathname}')<|docstring|>Run flake8 linter on the package.<|endoftext|>
9b03ee9b57c2a7c8dd26bdfcfcde06138d9d368d8e1f6519c4744d1162143a47
@task def format(ctx, pathname='devtest', check=False): 'Run yapf formatter on the specified file, or recurse into directory.' option = ('-d' if check else '-i') recurse = ('--recursive' if os.path.isdir(pathname) else '') ctx.run(f'{PYTHONBIN} -m yapf --style setup.cfg {option} {recurse} {pathname}')
Run yapf formatter on the specified file, or recurse into directory.
tasks.py
format
pycopia/devtest
0
python
@task def format(ctx, pathname='devtest', check=False): option = ('-d' if check else '-i') recurse = ('--recursive' if os.path.isdir(pathname) else ) ctx.run(f'{PYTHONBIN} -m yapf --style setup.cfg {option} {recurse} {pathname}')
@task def format(ctx, pathname='devtest', check=False): option = ('-d' if check else '-i') recurse = ('--recursive' if os.path.isdir(pathname) else ) ctx.run(f'{PYTHONBIN} -m yapf --style setup.cfg {option} {recurse} {pathname}')<|docstring|>Run yapf formatter on the specified file, or recurse into directory.<|endoftext|>
af1cfe32ee47cf866382a397e1b5c385a67a21c47d3393d75c569207a757d3f8
@task def format_changed(ctx, check=False, untracked=False): 'Run yapf formatter on currently modified python files.\n\n If check option given then just show the diff.\n ' option = ('-d' if check else '-i') files = get_modified_files(untracked) if files: ctx.run(f"{PYTHONBIN} -m yapf --style setup.cfg {option} {' '.join(files)}") else: print('No changed python files.')
Run yapf formatter on currently modified python files. If check option given then just show the diff.
tasks.py
format_changed
pycopia/devtest
0
python
@task def format_changed(ctx, check=False, untracked=False): 'Run yapf formatter on currently modified python files.\n\n If check option given then just show the diff.\n ' option = ('-d' if check else '-i') files = get_modified_files(untracked) if files: ctx.run(f"{PYTHONBIN} -m yapf --style setup.cfg {option} {' '.join(files)}") else: print('No changed python files.')
@task def format_changed(ctx, check=False, untracked=False): 'Run yapf formatter on currently modified python files.\n\n If check option given then just show the diff.\n ' option = ('-d' if check else '-i') files = get_modified_files(untracked) if files: ctx.run(f"{PYTHONBIN} -m yapf --style setup.cfg {option} {' '.join(files)}") else: print('No changed python files.')<|docstring|>Run yapf formatter on currently modified python files. If check option given then just show the diff.<|endoftext|>
ab3e53487efaf5404a6b74f57965edf69201edce83bfb1ddd6f989490e871231
@task def set_pypi_token(ctx): 'Set the token in the local key ring.\n ' pw = getpass.getpass(f'Enter pypi token? ') if pw: keyring.set_password(PYPI_HOST, PYPI_USER, pw) else: raise Exit('No password entered.', 3)
Set the token in the local key ring.
tasks.py
set_pypi_token
pycopia/devtest
0
python
@task def set_pypi_token(ctx): '\n ' pw = getpass.getpass(f'Enter pypi token? ') if pw: keyring.set_password(PYPI_HOST, PYPI_USER, pw) else: raise Exit('No password entered.', 3)
@task def set_pypi_token(ctx): '\n ' pw = getpass.getpass(f'Enter pypi token? ') if pw: keyring.set_password(PYPI_HOST, PYPI_USER, pw) else: raise Exit('No password entered.', 3)<|docstring|>Set the token in the local key ring.<|endoftext|>
b3bf68f6e6482bc1654de606d90b2d4e7bf10e4e4e1fc7f24ed3f780e7fbf086
@task def build(ctx): 'Build the intermediate package components.' ctx.run(f'{PYTHONBIN} setup.py build')
Build the intermediate package components.
tasks.py
build
pycopia/devtest
0
python
@task def build(ctx): ctx.run(f'{PYTHONBIN} setup.py build')
@task def build(ctx): ctx.run(f'{PYTHONBIN} setup.py build')<|docstring|>Build the intermediate package components.<|endoftext|>
551028788eb7bec2e92b4b1fc6552175982aa23ee373870fb2ce2d04cc4e6775
@task def dev_requirements(ctx): 'Install development requirements.' ctx.run(f'{PYTHONBIN} -m pip install --index-url {PYPI_INDEX} --trusted-host {PYPI_HOST} -r dev-requirements.txt --user')
Install development requirements.
tasks.py
dev_requirements
pycopia/devtest
0
python
@task def dev_requirements(ctx): ctx.run(f'{PYTHONBIN} -m pip install --index-url {PYPI_INDEX} --trusted-host {PYPI_HOST} -r dev-requirements.txt --user')
@task def dev_requirements(ctx): ctx.run(f'{PYTHONBIN} -m pip install --index-url {PYPI_INDEX} --trusted-host {PYPI_HOST} -r dev-requirements.txt --user')<|docstring|>Install development requirements.<|endoftext|>
b4459ba89724486dd41d0c604babc447b63da6df38a4ca66acf40950ea7c279f
@task(pre=[dev_requirements]) def develop(ctx, uninstall=False): 'Start developing in developer mode.' if uninstall: ctx.run(f'{PYTHONBIN} setup.py develop --uninstall --user') else: ctx.run(f'{PYTHONBIN} setup.py develop --index-url "{PYPI_INDEX}" --user')
Start developing in developer mode.
tasks.py
develop
pycopia/devtest
0
python
@task(pre=[dev_requirements]) def develop(ctx, uninstall=False): if uninstall: ctx.run(f'{PYTHONBIN} setup.py develop --uninstall --user') else: ctx.run(f'{PYTHONBIN} setup.py develop --index-url "{PYPI_INDEX}" --user')
@task(pre=[dev_requirements]) def develop(ctx, uninstall=False): if uninstall: ctx.run(f'{PYTHONBIN} setup.py develop --uninstall --user') else: ctx.run(f'{PYTHONBIN} setup.py develop --index-url "{PYPI_INDEX}" --user')<|docstring|>Start developing in developer mode.<|endoftext|>
d06c9f2b0c5357b854dc4e99240aa215bce4e7e61103aa6a2c54cf73ba83a195
@task def clean(ctx): 'Clean out build and cache files. Remove extension modules.' ctx.run(f'{PYTHONBIN} setup.py clean') ctx.run('find . -depth -type d -name __pycache__ -exec rm -rf {} \\;') ctx.run('find devtest -name "*.so" -delete') with ctx.cd('docs'): ctx.run('rm -f modules/devtest.*.rst') ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M clean . _build')
Clean out build and cache files. Remove extension modules.
tasks.py
clean
pycopia/devtest
0
python
@task def clean(ctx): ctx.run(f'{PYTHONBIN} setup.py clean') ctx.run('find . -depth -type d -name __pycache__ -exec rm -rf {} \\;') ctx.run('find devtest -name "*.so" -delete') with ctx.cd('docs'): ctx.run('rm -f modules/devtest.*.rst') ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M clean . _build')
@task def clean(ctx): ctx.run(f'{PYTHONBIN} setup.py clean') ctx.run('find . -depth -type d -name __pycache__ -exec rm -rf {} \\;') ctx.run('find devtest -name "*.so" -delete') with ctx.cd('docs'): ctx.run('rm -f modules/devtest.*.rst') ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M clean . _build')<|docstring|>Clean out build and cache files. Remove extension modules.<|endoftext|>
ac7ee43985404f8137b657aa209ee5867cde46276b5e1c887dda1b2c19dc8b5e
@task def cleandist(ctx): 'Clean out dist subdirectory.' if os.path.isdir('dist'): shutil.rmtree('dist', ignore_errors=True) os.mkdir('dist')
Clean out dist subdirectory.
tasks.py
cleandist
pycopia/devtest
0
python
@task def cleandist(ctx): if os.path.isdir('dist'): shutil.rmtree('dist', ignore_errors=True) os.mkdir('dist')
@task def cleandist(ctx): if os.path.isdir('dist'): shutil.rmtree('dist', ignore_errors=True) os.mkdir('dist')<|docstring|>Clean out dist subdirectory.<|endoftext|>
4b8443fb09f31162237469a1a461b42627b407aec35106bd64e52679dcaf257b
@task def test(ctx, testfile=None, ls=False): 'Run unit tests. Use ls option to only list them.' if ls: ctx.run(f'{PYTHONBIN} -m pytest --collect-only -qq tests') elif testfile: ctx.run(f'{PYTHONBIN} -m pytest -s {testfile}') else: ctx.run(f'{PYTHONBIN} -m pytest tests', hide=False, in_stream=False)
Run unit tests. Use ls option to only list them.
tasks.py
test
pycopia/devtest
0
python
@task def test(ctx, testfile=None, ls=False): if ls: ctx.run(f'{PYTHONBIN} -m pytest --collect-only -qq tests') elif testfile: ctx.run(f'{PYTHONBIN} -m pytest -s {testfile}') else: ctx.run(f'{PYTHONBIN} -m pytest tests', hide=False, in_stream=False)
@task def test(ctx, testfile=None, ls=False): if ls: ctx.run(f'{PYTHONBIN} -m pytest --collect-only -qq tests') elif testfile: ctx.run(f'{PYTHONBIN} -m pytest -s {testfile}') else: ctx.run(f'{PYTHONBIN} -m pytest tests', hide=False, in_stream=False)<|docstring|>Run unit tests. Use ls option to only list them.<|endoftext|>
c516f5044d0753ecc92429544b9dfbaf42add48890a2d81e0c8ae40501d22c5b
@task def tag(ctx, tag=None, major=False, minor=False, patch=False): "Tag or bump release with a semver tag. Makes a signed tag if you're a signer." latest = None if (tag is None): tags = get_tags() if (not tags): latest = semver.VersionInfo(0, 0, 0) else: latest = tags[(- 1)] if patch: nextver = latest.bump_patch() elif minor: nextver = latest.bump_minor() elif major: nextver = latest.bump_major() else: nextver = latest.bump_patch() else: if tag.startswith('v'): tag = tag[1:] try: nextver = semver.parse_version_info(tag) except ValueError: raise Exit('Invalid semver tag.', 2) print(latest, '->', nextver) tagopt = ('-s' if (CURRENT_USER in SIGNERS) else '-a') ctx.run(f'git tag {tagopt} -m "Release v{nextver}" v{nextver}')
Tag or bump release with a semver tag. Makes a signed tag if you're a signer.
tasks.py
tag
pycopia/devtest
0
python
@task def tag(ctx, tag=None, major=False, minor=False, patch=False): latest = None if (tag is None): tags = get_tags() if (not tags): latest = semver.VersionInfo(0, 0, 0) else: latest = tags[(- 1)] if patch: nextver = latest.bump_patch() elif minor: nextver = latest.bump_minor() elif major: nextver = latest.bump_major() else: nextver = latest.bump_patch() else: if tag.startswith('v'): tag = tag[1:] try: nextver = semver.parse_version_info(tag) except ValueError: raise Exit('Invalid semver tag.', 2) print(latest, '->', nextver) tagopt = ('-s' if (CURRENT_USER in SIGNERS) else '-a') ctx.run(f'git tag {tagopt} -m "Release v{nextver}" v{nextver}')
@task def tag(ctx, tag=None, major=False, minor=False, patch=False): latest = None if (tag is None): tags = get_tags() if (not tags): latest = semver.VersionInfo(0, 0, 0) else: latest = tags[(- 1)] if patch: nextver = latest.bump_patch() elif minor: nextver = latest.bump_minor() elif major: nextver = latest.bump_major() else: nextver = latest.bump_patch() else: if tag.startswith('v'): tag = tag[1:] try: nextver = semver.parse_version_info(tag) except ValueError: raise Exit('Invalid semver tag.', 2) print(latest, '->', nextver) tagopt = ('-s' if (CURRENT_USER in SIGNERS) else '-a') ctx.run(f'git tag {tagopt} -m "Release v{nextver}" v{nextver}')<|docstring|>Tag or bump release with a semver tag. Makes a signed tag if you're a signer.<|endoftext|>
267be847d3f4869e6bbcc9f4e973bec74c50db04c49a378839d18a103054e685
@task def tag_delete(ctx, tag=None): 'Delete a tag, both local and remote.' if tag: ctx.run(f'git tag -d {tag}') ctx.run(f'git push origin :refs/tags/{tag}')
Delete a tag, both local and remote.
tasks.py
tag_delete
pycopia/devtest
0
python
@task def tag_delete(ctx, tag=None): if tag: ctx.run(f'git tag -d {tag}') ctx.run(f'git push origin :refs/tags/{tag}')
@task def tag_delete(ctx, tag=None): if tag: ctx.run(f'git tag -d {tag}') ctx.run(f'git push origin :refs/tags/{tag}')<|docstring|>Delete a tag, both local and remote.<|endoftext|>
8d377d27cf01c46a73a512ecaec87cdc8ccdd34dc04fd171888ebacfe5d59126
@task(cleandist) def sdist(ctx): 'Build source distribution.' ctx.run(f'{PYTHONBIN} setup.py sdist')
Build source distribution.
tasks.py
sdist
pycopia/devtest
0
python
@task(cleandist) def sdist(ctx): ctx.run(f'{PYTHONBIN} setup.py sdist')
@task(cleandist) def sdist(ctx): ctx.run(f'{PYTHONBIN} setup.py sdist')<|docstring|>Build source distribution.<|endoftext|>
12e2819d5de105538746c0c305e1d07e861d2d9e7398bdee12ad3c729c139b14
@task def build_ext(ctx): 'Build compiled extension modules, in place.' ctx.run(f'{PYTHONBIN} setup.py build_ext --inplace')
Build compiled extension modules, in place.
tasks.py
build_ext
pycopia/devtest
0
python
@task def build_ext(ctx): ctx.run(f'{PYTHONBIN} setup.py build_ext --inplace')
@task def build_ext(ctx): ctx.run(f'{PYTHONBIN} setup.py build_ext --inplace')<|docstring|>Build compiled extension modules, in place.<|endoftext|>
385866f764584632fd2a4dcd98beac5d6707174f3fe2971bfe65955ac662d244
@task(sdist) def bdist(ctx): 'Build a standard wheel file, an installable format.' ctx.run(f'{PYTHONBIN} setup.py bdist_wheel')
Build a standard wheel file, an installable format.
tasks.py
bdist
pycopia/devtest
0
python
@task(sdist) def bdist(ctx): ctx.run(f'{PYTHONBIN} setup.py bdist_wheel')
@task(sdist) def bdist(ctx): ctx.run(f'{PYTHONBIN} setup.py bdist_wheel')<|docstring|>Build a standard wheel file, an installable format.<|endoftext|>
9d3b23d0e840370a19776691ca12e11eb917e77ec0f83c6842d8c50734e7f89e
@task(bdist) def sign(ctx): 'Cryptographically sign dist with your default GPG key.' if (CURRENT_USER in SIGNERS): ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.whl') ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.tar.gz') else: print('Not signing.')
Cryptographically sign dist with your default GPG key.
tasks.py
sign
pycopia/devtest
0
python
@task(bdist) def sign(ctx): if (CURRENT_USER in SIGNERS): ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.whl') ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.tar.gz') else: print('Not signing.')
@task(bdist) def sign(ctx): if (CURRENT_USER in SIGNERS): ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.whl') ctx.run(f'{GPG} --detach-sign -a dist/devtest-*.tar.gz') else: print('Not signing.')<|docstring|>Cryptographically sign dist with your default GPG key.<|endoftext|>
2db5575244c1496800df39877039884e98e358d66c456b979c0ccc8c10a495e9
@task(pre=[sign]) def publish(ctx): 'Publish built wheel file to package repo.' token = get_pypi_token() distfiles = glob('dist/*.whl') distfiles.extend(glob('dist/*.tar.gz')) if (not distfiles): raise Exit('Nothing in dist folder!') distfiles = ' '.join(distfiles) ctx.run(f'{PYTHONBIN} -m twine upload --repository-url "{PYPI_URL}" --username {PYPI_USER} --password {token} {distfiles}')
Publish built wheel file to package repo.
tasks.py
publish
pycopia/devtest
0
python
@task(pre=[sign]) def publish(ctx): token = get_pypi_token() distfiles = glob('dist/*.whl') distfiles.extend(glob('dist/*.tar.gz')) if (not distfiles): raise Exit('Nothing in dist folder!') distfiles = ' '.join(distfiles) ctx.run(f'{PYTHONBIN} -m twine upload --repository-url "{PYPI_URL}" --username {PYPI_USER} --password {token} {distfiles}')
@task(pre=[sign]) def publish(ctx): token = get_pypi_token() distfiles = glob('dist/*.whl') distfiles.extend(glob('dist/*.tar.gz')) if (not distfiles): raise Exit('Nothing in dist folder!') distfiles = ' '.join(distfiles) ctx.run(f'{PYTHONBIN} -m twine upload --repository-url "{PYPI_URL}" --username {PYPI_USER} --password {token} {distfiles}')<|docstring|>Publish built wheel file to package repo.<|endoftext|>
b2b6e597dc45f72118abdeb8d661a8c80aff4851b7f4e46981e0d96fe43547a5
@task def docs(ctx): 'Build the HTML documentation.' ctx.run('rm docs/modules/devtest.*.rst', warn=True) ctx.run(f'{PYTHONBIN} -m sphinx.ext.apidoc --force --separate --no-toc --output-dir docs/modules devtest') with ctx.cd('docs'): ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M html . _build') if os.environ.get('DISPLAY'): ctx.run('xdg-open docs/_build/html/index.html')
Build the HTML documentation.
tasks.py
docs
pycopia/devtest
0
python
@task def docs(ctx): ctx.run('rm docs/modules/devtest.*.rst', warn=True) ctx.run(f'{PYTHONBIN} -m sphinx.ext.apidoc --force --separate --no-toc --output-dir docs/modules devtest') with ctx.cd('docs'): ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M html . _build') if os.environ.get('DISPLAY'): ctx.run('xdg-open docs/_build/html/index.html')
@task def docs(ctx): ctx.run('rm docs/modules/devtest.*.rst', warn=True) ctx.run(f'{PYTHONBIN} -m sphinx.ext.apidoc --force --separate --no-toc --output-dir docs/modules devtest') with ctx.cd('docs'): ctx.run(f'{PYTHONBIN} -m sphinx.cmd.build -M html . _build') if os.environ.get('DISPLAY'): ctx.run('xdg-open docs/_build/html/index.html')<|docstring|>Build the HTML documentation.<|endoftext|>
ccbca7478a38f768bf6994d233815c23b8fc53ee769ded37ca6c25e6157b9522
@task def branch(ctx, name=None): 'start a new branch, both local and remote tracking.' if name: ctx.run(f'git checkout -b {name}') ctx.run(f'git push -u origin {name}') else: ctx.run('git --no-pager branch')
start a new branch, both local and remote tracking.
tasks.py
branch
pycopia/devtest
0
python
@task def branch(ctx, name=None): if name: ctx.run(f'git checkout -b {name}') ctx.run(f'git push -u origin {name}') else: ctx.run('git --no-pager branch')
@task def branch(ctx, name=None): if name: ctx.run(f'git checkout -b {name}') ctx.run(f'git push -u origin {name}') else: ctx.run('git --no-pager branch')<|docstring|>start a new branch, both local and remote tracking.<|endoftext|>
fefe75b8f4321e551d47f25427f8e6fe7c6b579d2a4d6d6b53b8597221d7a896
@task def branch_delete(ctx, name=None): 'Delete local, remote and tracking branch by name.' if name: ctx.run(f'git branch -d {name}', warn=True) ctx.run(f'git branch -d -r {name}', warn=True) ctx.run(f'git push origin --delete {name}', warn=True) else: print('Supply a branch name: --name <name>')
Delete local, remote and tracking branch by name.
tasks.py
branch_delete
pycopia/devtest
0
python
@task def branch_delete(ctx, name=None): if name: ctx.run(f'git branch -d {name}', warn=True) ctx.run(f'git branch -d -r {name}', warn=True) ctx.run(f'git push origin --delete {name}', warn=True) else: print('Supply a branch name: --name <name>')
@task def branch_delete(ctx, name=None): if name: ctx.run(f'git branch -d {name}', warn=True) ctx.run(f'git branch -d -r {name}', warn=True) ctx.run(f'git push origin --delete {name}', warn=True) else: print('Supply a branch name: --name <name>')<|docstring|>Delete local, remote and tracking branch by name.<|endoftext|>
2e3258bed2ffbe7866b6fe9344662480676cee0ee67a4b98527ce3487c0bd9b2
@task(pre=[sdist]) def docker_build(ctx): 'Build docker image.' version = get_version() if (not version): raise Exit('Need to tag a version first.', 2) environ = {'PYVER': '{}.{}'.format(sys.version_info.major, sys.version_info.minor), 'VERSION': version, 'PYPI_REPO': PYPI_INDEX, 'PYPI_HOST': PYPI_HOST} ctx.run(f'docker build --build-arg PYVER --build-arg VERSION --build-arg PYPI_REPO --build-arg PYPI_HOST -t devtest:{version} .', env=environ) print(f'''Done. To run it: docker run -it devtest:{version}''')
Build docker image.
tasks.py
docker_build
pycopia/devtest
0
python
@task(pre=[sdist]) def docker_build(ctx): version = get_version() if (not version): raise Exit('Need to tag a version first.', 2) environ = {'PYVER': '{}.{}'.format(sys.version_info.major, sys.version_info.minor), 'VERSION': version, 'PYPI_REPO': PYPI_INDEX, 'PYPI_HOST': PYPI_HOST} ctx.run(f'docker build --build-arg PYVER --build-arg VERSION --build-arg PYPI_REPO --build-arg PYPI_HOST -t devtest:{version} .', env=environ) print(f'Done. To run it: docker run -it devtest:{version}')
@task(pre=[sdist]) def docker_build(ctx): version = get_version() if (not version): raise Exit('Need to tag a version first.', 2) environ = {'PYVER': '{}.{}'.format(sys.version_info.major, sys.version_info.minor), 'VERSION': version, 'PYPI_REPO': PYPI_INDEX, 'PYPI_HOST': PYPI_HOST} ctx.run(f'docker build --build-arg PYVER --build-arg VERSION --build-arg PYPI_REPO --build-arg PYPI_HOST -t devtest:{version} .', env=environ) print(f'Done. To run it: docker run -it devtest:{version}')<|docstring|>Build docker image.<|endoftext|>
196e88fae02e34628a6559774d237dc85c1f8ccd7e510809fceb3f1ecf8940b8
@task def logfile(ctx, name='devtester'): 'Dump the system log file with optional name filter.' if WINDOWS: ctx.run(f'wevtutil.exe qe Application /query:"*[System[Provider[@Name={name!r}]]]" /f:text') elif LINUX: ctx.run(f'journalctl --identifier={name!r} --no-pager --priority=debug') elif DARWIN: ctx.run(f"""log stream --predicate 'senderImagePath contains "Python"' --level debug""")
Dump the system log file with optional name filter.
tasks.py
logfile
pycopia/devtest
0
python
@task def logfile(ctx, name='devtester'): if WINDOWS: ctx.run(f'wevtutil.exe qe Application /query:"*[System[Provider[@Name={name!r}]]]" /f:text') elif LINUX: ctx.run(f'journalctl --identifier={name!r} --no-pager --priority=debug') elif DARWIN: ctx.run(f"log stream --predicate 'senderImagePath contains "Python"' --level debug")
@task def logfile(ctx, name='devtester'): if WINDOWS: ctx.run(f'wevtutil.exe qe Application /query:"*[System[Provider[@Name={name!r}]]]" /f:text') elif LINUX: ctx.run(f'journalctl --identifier={name!r} --no-pager --priority=debug') elif DARWIN: ctx.run(f"log stream --predicate 'senderImagePath contains "Python"' --level debug")<|docstring|>Dump the system log file with optional name filter.<|endoftext|>
8e43f24fcbfc1fef0646b177097b8f947ccc2139df12b1c6916a275a407f9f6c
def find_git_base(): 'Find the base directory of this git repo.\n\n The git status output is always relative to this directory.\n ' start = Path.cwd().resolve() while start: if (start / '.git').exists(): return start start = start.parent raise Exit('Not able to find git repo base.')
Find the base directory of this git repo. The git status output is always relative to this directory.
tasks.py
find_git_base
pycopia/devtest
0
python
def find_git_base(): 'Find the base directory of this git repo.\n\n The git status output is always relative to this directory.\n ' start = Path.cwd().resolve() while start: if (start / '.git').exists(): return start start = start.parent raise Exit('Not able to find git repo base.')
def find_git_base(): 'Find the base directory of this git repo.\n\n The git status output is always relative to this directory.\n ' start = Path.cwd().resolve() while start: if (start / '.git').exists(): return start start = start.parent raise Exit('Not able to find git repo base.')<|docstring|>Find the base directory of this git repo. The git status output is always relative to this directory.<|endoftext|>
cc31a770f782739dab0797c3926c2177e99c075f6909cf6fd6d3b33182910af3
def get_modified_files(untracked): 'Find the list of modified and, optionally, untracked Python files.\n\n If `untracked` is True, also include untracked Python files.\n ' filelist = [] gitbase = find_git_base() gitout = run('git status --porcelain=1 -z', hide=True) for line in gitout.stdout.split('\x00'): if line: if (not line.endswith('.py')): continue if (line[0:2] == ' M'): filelist.append(resolve_path(gitbase, line[3:])) if (untracked and (line[0:2] == '??')): filelist.append(resolve_path(gitbase, line[3:])) return filelist
Find the list of modified and, optionally, untracked Python files. If `untracked` is True, also include untracked Python files.
tasks.py
get_modified_files
pycopia/devtest
0
python
def get_modified_files(untracked): 'Find the list of modified and, optionally, untracked Python files.\n\n If `untracked` is True, also include untracked Python files.\n ' filelist = [] gitbase = find_git_base() gitout = run('git status --porcelain=1 -z', hide=True) for line in gitout.stdout.split('\x00'): if line: if (not line.endswith('.py')): continue if (line[0:2] == ' M'): filelist.append(resolve_path(gitbase, line[3:])) if (untracked and (line[0:2] == '??')): filelist.append(resolve_path(gitbase, line[3:])) return filelist
def get_modified_files(untracked): 'Find the list of modified and, optionally, untracked Python files.\n\n If `untracked` is True, also include untracked Python files.\n ' filelist = [] gitbase = find_git_base() gitout = run('git status --porcelain=1 -z', hide=True) for line in gitout.stdout.split('\x00'): if line: if (not line.endswith('.py')): continue if (line[0:2] == ' M'): filelist.append(resolve_path(gitbase, line[3:])) if (untracked and (line[0:2] == '??')): filelist.append(resolve_path(gitbase, line[3:])) return filelist<|docstring|>Find the list of modified and, optionally, untracked Python files. If `untracked` is True, also include untracked Python files.<|endoftext|>
6c1797e18f6c9c8799febe2923b2c71f3c306128e4f0dd2fab9d79e56b073f0f
def __init__(self, temboo_session): '\n Create a new instance of the UploadInstallationData Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UploadInstallationData, self).__init__(temboo_session, '/Library/Parse/PushNotifications/UploadInstallationData')
Create a new instance of the UploadInstallationData Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.
temboo/core/Library/Parse/PushNotifications/UploadInstallationData.py
__init__
jordanemedlock/psychtruths
7
python
def __init__(self, temboo_session): '\n Create a new instance of the UploadInstallationData Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UploadInstallationData, self).__init__(temboo_session, '/Library/Parse/PushNotifications/UploadInstallationData')
def __init__(self, temboo_session): '\n Create a new instance of the UploadInstallationData Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UploadInstallationData, self).__init__(temboo_session, '/Library/Parse/PushNotifications/UploadInstallationData')<|docstring|>Create a new instance of the UploadInstallationData Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.<|endoftext|>
e9974be4942f6ca77ae754cc6892d23e9472a4136d82a6da0b2e76aa77d9c3bb
def set_Installation(self, value): '\n Set the value of the Installation input for this Choreo. ((required, json) A JSON string containing the installation data. See documentation for syntax examples.)\n ' super(UploadInstallationDataInputSet, self)._set_input('Installation', value)
Set the value of the Installation input for this Choreo. ((required, json) A JSON string containing the installation data. See documentation for syntax examples.)
temboo/core/Library/Parse/PushNotifications/UploadInstallationData.py
set_Installation
jordanemedlock/psychtruths
7
python
def set_Installation(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('Installation', value)
def set_Installation(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('Installation', value)<|docstring|>Set the value of the Installation input for this Choreo. ((required, json) A JSON string containing the installation data. See documentation for syntax examples.)<|endoftext|>
976ce64dd71143306b6a77c55ecf6557296d438f352ed11ce0c59f5b065b7fc3
def set_ApplicationID(self, value): '\n Set the value of the ApplicationID input for this Choreo. ((required, string) The Application ID provided by Parse.)\n ' super(UploadInstallationDataInputSet, self)._set_input('ApplicationID', value)
Set the value of the ApplicationID input for this Choreo. ((required, string) The Application ID provided by Parse.)
temboo/core/Library/Parse/PushNotifications/UploadInstallationData.py
set_ApplicationID
jordanemedlock/psychtruths
7
python
def set_ApplicationID(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('ApplicationID', value)
def set_ApplicationID(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('ApplicationID', value)<|docstring|>Set the value of the ApplicationID input for this Choreo. ((required, string) The Application ID provided by Parse.)<|endoftext|>
3cffe567936c6464dbe3ef68d624ce199873bbbd876a7b02ad18f32d82a71858
def set_RESTAPIKey(self, value): '\n Set the value of the RESTAPIKey input for this Choreo. ((required, string) The REST API Key provided by Parse.)\n ' super(UploadInstallationDataInputSet, self)._set_input('RESTAPIKey', value)
Set the value of the RESTAPIKey input for this Choreo. ((required, string) The REST API Key provided by Parse.)
temboo/core/Library/Parse/PushNotifications/UploadInstallationData.py
set_RESTAPIKey
jordanemedlock/psychtruths
7
python
def set_RESTAPIKey(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('RESTAPIKey', value)
def set_RESTAPIKey(self, value): '\n \n ' super(UploadInstallationDataInputSet, self)._set_input('RESTAPIKey', value)<|docstring|>Set the value of the RESTAPIKey input for this Choreo. ((required, string) The REST API Key provided by Parse.)<|endoftext|>
43cfb02d7da32b267644600875c1e5ec4c3ef9d4a3416d2c412e17cfd9b362ea
def get_Response(self): '\n Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Parse.)\n ' return self._output.get('Response', None)
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Parse.)
temboo/core/Library/Parse/PushNotifications/UploadInstallationData.py
get_Response
jordanemedlock/psychtruths
7
python
def get_Response(self): '\n \n ' return self._output.get('Response', None)
def get_Response(self): '\n \n ' return self._output.get('Response', None)<|docstring|>Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Parse.)<|endoftext|>
abd9972410a57877ee3fdeaf28492b55254fd84e89a316fedc95b9eec5799887
@staticmethod def print_move(move): 'issue an order' stdout.write('{}\n'.format(move)) stdout.flush()
issue an order
bot/game.py
print_move
thequeenofspades/AlphaGOLADZero
1
python
@staticmethod def print_move(move): stdout.write('{}\n'.format(move)) stdout.flush()
@staticmethod def print_move(move): stdout.write('{}\n'.format(move)) stdout.flush()<|docstring|>issue an order<|endoftext|>
c57aad9d46510899ecd3121bf938061c185496b6515d825c725350f695a08cca
def run(self, bot): 'parse input, update game state and call the bot classes do_turn method' not_finished = True data = '' while ((not stdin.closed) and not_finished): try: current_line = stdin.readline().rstrip('\r\n') if (len(current_line) <= 0): time.sleep(1) continue data += (current_line + '\n') if current_line.lower().startswith('action'): self.update(data) move = bot.make_move(self) self.print_move(move) data = '' elif current_line.lower().startswith('quit'): not_finished = False except EOFError: break except KeyboardInterrupt: raise except: traceback.print_exc(file=stderr) stderr.flush()
parse input, update game state and call the bot classes do_turn method
bot/game.py
run
thequeenofspades/AlphaGOLADZero
1
python
def run(self, bot): not_finished = True data = while ((not stdin.closed) and not_finished): try: current_line = stdin.readline().rstrip('\r\n') if (len(current_line) <= 0): time.sleep(1) continue data += (current_line + '\n') if current_line.lower().startswith('action'): self.update(data) move = bot.make_move(self) self.print_move(move) data = elif current_line.lower().startswith('quit'): not_finished = False except EOFError: break except KeyboardInterrupt: raise except: traceback.print_exc(file=stderr) stderr.flush()
def run(self, bot): not_finished = True data = while ((not stdin.closed) and not_finished): try: current_line = stdin.readline().rstrip('\r\n') if (len(current_line) <= 0): time.sleep(1) continue data += (current_line + '\n') if current_line.lower().startswith('action'): self.update(data) move = bot.make_move(self) self.print_move(move) data = elif current_line.lower().startswith('quit'): not_finished = False except EOFError: break except KeyboardInterrupt: raise except: traceback.print_exc(file=stderr) stderr.flush()<|docstring|>parse input, update game state and call the bot classes do_turn method<|endoftext|>
289dda90419607c202636066fa233488336c3eacf4589d9a8b0be7da70e7fa10
def build_model(cfg: dict=None, src_vocab: Vocabulary=None, trg_vocab: Vocabulary=None, trv_vocab: Vocabulary=None, canonizer=None) -> Model: '\n Build and initialize the model according to the configuration.\n\n :param cfg: dictionary configuration containing model specifications\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :return: built and initialized model\n ' src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] if ('embedding_files' in cfg.keys()): assert (not cfg.get('tied_embeddings', False)), 'TODO implement tied embeddings along with pretrained initialization' raise NotImplementedError('TODO implement kbsrc embed loading for embedding files') weight_tensors = [] for weight_file in cfg['embedding_files']: with open(weight_file, 'r') as f: weight = [] for line in f.readlines(): line = line.split() line = [float(x) for x in line] weight.append(line) weight = FloatTensor(weight) weight_tensors.append(weight) src_embed = Embeddings(int(weight_tensors[0][0].shape[0]), False, len(weight_tensors[0])) src_embed.lut.weight.data = weight_tensors[0] trg_embed = Embeddings(int(weight_tensors[1][0].shape[0]), False, len(weight_tensors[1])) trg_embed.lut.weight.data = weight_tensors[1] else: src_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get('kb_embed_separate', False): kbsrc_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) else: kbsrc_embed = src_embed if cfg.get('tied_embeddings', False): if (src_vocab.itos == trg_vocab.itos): trg_embed = src_embed else: raise ConfigurationError('Embedding cannot be tied since vocabularies differ.') else: trg_embed = Embeddings(**cfg['decoder']['embeddings'], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) enc_dropout = cfg['encoder'].get('dropout', 0.0) enc_emb_dropout = cfg['encoder']['embeddings'].get('dropout', enc_dropout) if (cfg['encoder'].get('type', 'recurrent') == 'transformer'): assert (cfg['encoder']['embeddings']['embedding_dim'] == cfg['encoder']['hidden_size']), 'for transformer, emb_size must be hidden_size' encoder = TransformerEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) kb_task = bool(cfg.get('kb', False)) k_hops = int(cfg.get('k_hops', 1)) same_module_for_all_hops = bool(cfg.get('same_module_for_all_hops', False)) do_postproc = bool(cfg.get('do_postproc', True)) copy_from_source = bool(cfg.get('copy_from_source', True)) canonization_func = (None if (canonizer is None) else canonizer(copy_from_source=copy_from_source)) kb_input_feeding = bool(cfg.get('kb_input_feeding', True)) kb_feed_rnn = bool(cfg.get('kb_feed_rnn', True)) kb_multihead_feed = bool(cfg.get('kb_multihead_feed', False)) posEncKBkeys = cfg.get('posEncdKBkeys', False) tfstyletf = cfg.get('tfstyletf', True) infeedkb = bool(cfg.get('infeedkb', False)) outfeedkb = bool(cfg.get('outfeedkb', False)) add_kb_biases_to_output = bool(cfg.get('add_kb_biases_to_output', True)) kb_max_dims = cfg.get('kb_max_dims', (16, 32)) double_decoder = cfg.get('double_decoder', False) tied_side_softmax = cfg.get('tied_side_softmax', False) do_pad_kb_keys = cfg.get('pad_kb_keys', True) if hasattr(kb_max_dims, '__iter__'): kb_max_dims = tuple(kb_max_dims) else: assert (type(kb_max_dims) == int), kb_max_dims kb_max_dims = (kb_max_dims,) assert cfg['decoder']['hidden_size'] dec_dropout = cfg['decoder'].get('dropout', 0.0) dec_emb_dropout = cfg['decoder']['embeddings'].get('dropout', dec_dropout) if (cfg['decoder'].get('type', 'recurrent') == 'transformer'): if tfstyletf: decoder = TransformerDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, kb_key_emb_size=kbsrc_embed.embedding_dim, feed_kb_hidden=kb_input_feeding, infeedkb=infeedkb, outfeedkb=outfeedkb, double_decoder=double_decoder) else: decoder = TransformerKBrnnDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed) elif (not kb_task): decoder = RecurrentDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = KeyValRetRNNDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed, do_pad_kb_keys=do_pad_kb_keys) generator = Generator(dec_hidden_size=cfg['decoder']['hidden_size'], vocab_size=len(trg_vocab), add_kb_biases_to_output=add_kb_biases_to_output, double_decoder=double_decoder) model = Model(encoder=encoder, decoder=decoder, generator=generator, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab, kb_key_embed=kbsrc_embed, trv_vocab=trv_vocab, k_hops=k_hops, do_postproc=do_postproc, canonize=canonization_func, kb_att_dims=len(kb_max_dims), posEncKBkeys=posEncKBkeys) if cfg.get('tied_softmax', False): if (trg_embed.lut.weight.shape == model.generator.output_layer.weight.shape): model.generator.output_layer.weight = trg_embed.lut.weight if model.generator.double_decoder: assert hasattr(model.generator, 'side_output_layer') if tied_side_softmax: model.generator.side_output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError('For tied_softmax, the decoder embedding_dim and decoder hidden_size must be the same.The decoder must be a Transformer.') initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :param trv_vocab: kb true value lookup vocabulary :return: built and initialized model
joeynmt/model.py
build_model
marvosyntactical/joeynmt
3
python
def build_model(cfg: dict=None, src_vocab: Vocabulary=None, trg_vocab: Vocabulary=None, trv_vocab: Vocabulary=None, canonizer=None) -> Model: '\n Build and initialize the model according to the configuration.\n\n :param cfg: dictionary configuration containing model specifications\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :return: built and initialized model\n ' src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] if ('embedding_files' in cfg.keys()): assert (not cfg.get('tied_embeddings', False)), 'TODO implement tied embeddings along with pretrained initialization' raise NotImplementedError('TODO implement kbsrc embed loading for embedding files') weight_tensors = [] for weight_file in cfg['embedding_files']: with open(weight_file, 'r') as f: weight = [] for line in f.readlines(): line = line.split() line = [float(x) for x in line] weight.append(line) weight = FloatTensor(weight) weight_tensors.append(weight) src_embed = Embeddings(int(weight_tensors[0][0].shape[0]), False, len(weight_tensors[0])) src_embed.lut.weight.data = weight_tensors[0] trg_embed = Embeddings(int(weight_tensors[1][0].shape[0]), False, len(weight_tensors[1])) trg_embed.lut.weight.data = weight_tensors[1] else: src_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get('kb_embed_separate', False): kbsrc_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) else: kbsrc_embed = src_embed if cfg.get('tied_embeddings', False): if (src_vocab.itos == trg_vocab.itos): trg_embed = src_embed else: raise ConfigurationError('Embedding cannot be tied since vocabularies differ.') else: trg_embed = Embeddings(**cfg['decoder']['embeddings'], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) enc_dropout = cfg['encoder'].get('dropout', 0.0) enc_emb_dropout = cfg['encoder']['embeddings'].get('dropout', enc_dropout) if (cfg['encoder'].get('type', 'recurrent') == 'transformer'): assert (cfg['encoder']['embeddings']['embedding_dim'] == cfg['encoder']['hidden_size']), 'for transformer, emb_size must be hidden_size' encoder = TransformerEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) kb_task = bool(cfg.get('kb', False)) k_hops = int(cfg.get('k_hops', 1)) same_module_for_all_hops = bool(cfg.get('same_module_for_all_hops', False)) do_postproc = bool(cfg.get('do_postproc', True)) copy_from_source = bool(cfg.get('copy_from_source', True)) canonization_func = (None if (canonizer is None) else canonizer(copy_from_source=copy_from_source)) kb_input_feeding = bool(cfg.get('kb_input_feeding', True)) kb_feed_rnn = bool(cfg.get('kb_feed_rnn', True)) kb_multihead_feed = bool(cfg.get('kb_multihead_feed', False)) posEncKBkeys = cfg.get('posEncdKBkeys', False) tfstyletf = cfg.get('tfstyletf', True) infeedkb = bool(cfg.get('infeedkb', False)) outfeedkb = bool(cfg.get('outfeedkb', False)) add_kb_biases_to_output = bool(cfg.get('add_kb_biases_to_output', True)) kb_max_dims = cfg.get('kb_max_dims', (16, 32)) double_decoder = cfg.get('double_decoder', False) tied_side_softmax = cfg.get('tied_side_softmax', False) do_pad_kb_keys = cfg.get('pad_kb_keys', True) if hasattr(kb_max_dims, '__iter__'): kb_max_dims = tuple(kb_max_dims) else: assert (type(kb_max_dims) == int), kb_max_dims kb_max_dims = (kb_max_dims,) assert cfg['decoder']['hidden_size'] dec_dropout = cfg['decoder'].get('dropout', 0.0) dec_emb_dropout = cfg['decoder']['embeddings'].get('dropout', dec_dropout) if (cfg['decoder'].get('type', 'recurrent') == 'transformer'): if tfstyletf: decoder = TransformerDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, kb_key_emb_size=kbsrc_embed.embedding_dim, feed_kb_hidden=kb_input_feeding, infeedkb=infeedkb, outfeedkb=outfeedkb, double_decoder=double_decoder) else: decoder = TransformerKBrnnDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed) elif (not kb_task): decoder = RecurrentDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = KeyValRetRNNDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed, do_pad_kb_keys=do_pad_kb_keys) generator = Generator(dec_hidden_size=cfg['decoder']['hidden_size'], vocab_size=len(trg_vocab), add_kb_biases_to_output=add_kb_biases_to_output, double_decoder=double_decoder) model = Model(encoder=encoder, decoder=decoder, generator=generator, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab, kb_key_embed=kbsrc_embed, trv_vocab=trv_vocab, k_hops=k_hops, do_postproc=do_postproc, canonize=canonization_func, kb_att_dims=len(kb_max_dims), posEncKBkeys=posEncKBkeys) if cfg.get('tied_softmax', False): if (trg_embed.lut.weight.shape == model.generator.output_layer.weight.shape): model.generator.output_layer.weight = trg_embed.lut.weight if model.generator.double_decoder: assert hasattr(model.generator, 'side_output_layer') if tied_side_softmax: model.generator.side_output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError('For tied_softmax, the decoder embedding_dim and decoder hidden_size must be the same.The decoder must be a Transformer.') initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model
def build_model(cfg: dict=None, src_vocab: Vocabulary=None, trg_vocab: Vocabulary=None, trv_vocab: Vocabulary=None, canonizer=None) -> Model: '\n Build and initialize the model according to the configuration.\n\n :param cfg: dictionary configuration containing model specifications\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :return: built and initialized model\n ' src_padding_idx = src_vocab.stoi[PAD_TOKEN] trg_padding_idx = trg_vocab.stoi[PAD_TOKEN] if ('embedding_files' in cfg.keys()): assert (not cfg.get('tied_embeddings', False)), 'TODO implement tied embeddings along with pretrained initialization' raise NotImplementedError('TODO implement kbsrc embed loading for embedding files') weight_tensors = [] for weight_file in cfg['embedding_files']: with open(weight_file, 'r') as f: weight = [] for line in f.readlines(): line = line.split() line = [float(x) for x in line] weight.append(line) weight = FloatTensor(weight) weight_tensors.append(weight) src_embed = Embeddings(int(weight_tensors[0][0].shape[0]), False, len(weight_tensors[0])) src_embed.lut.weight.data = weight_tensors[0] trg_embed = Embeddings(int(weight_tensors[1][0].shape[0]), False, len(weight_tensors[1])) trg_embed.lut.weight.data = weight_tensors[1] else: src_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) if cfg.get('kb_embed_separate', False): kbsrc_embed = Embeddings(**cfg['encoder']['embeddings'], vocab_size=len(src_vocab), padding_idx=src_padding_idx) else: kbsrc_embed = src_embed if cfg.get('tied_embeddings', False): if (src_vocab.itos == trg_vocab.itos): trg_embed = src_embed else: raise ConfigurationError('Embedding cannot be tied since vocabularies differ.') else: trg_embed = Embeddings(**cfg['decoder']['embeddings'], vocab_size=len(trg_vocab), padding_idx=trg_padding_idx) enc_dropout = cfg['encoder'].get('dropout', 0.0) enc_emb_dropout = cfg['encoder']['embeddings'].get('dropout', enc_dropout) if (cfg['encoder'].get('type', 'recurrent') == 'transformer'): assert (cfg['encoder']['embeddings']['embedding_dim'] == cfg['encoder']['hidden_size']), 'for transformer, emb_size must be hidden_size' encoder = TransformerEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) else: encoder = RecurrentEncoder(**cfg['encoder'], emb_size=src_embed.embedding_dim, emb_dropout=enc_emb_dropout) kb_task = bool(cfg.get('kb', False)) k_hops = int(cfg.get('k_hops', 1)) same_module_for_all_hops = bool(cfg.get('same_module_for_all_hops', False)) do_postproc = bool(cfg.get('do_postproc', True)) copy_from_source = bool(cfg.get('copy_from_source', True)) canonization_func = (None if (canonizer is None) else canonizer(copy_from_source=copy_from_source)) kb_input_feeding = bool(cfg.get('kb_input_feeding', True)) kb_feed_rnn = bool(cfg.get('kb_feed_rnn', True)) kb_multihead_feed = bool(cfg.get('kb_multihead_feed', False)) posEncKBkeys = cfg.get('posEncdKBkeys', False) tfstyletf = cfg.get('tfstyletf', True) infeedkb = bool(cfg.get('infeedkb', False)) outfeedkb = bool(cfg.get('outfeedkb', False)) add_kb_biases_to_output = bool(cfg.get('add_kb_biases_to_output', True)) kb_max_dims = cfg.get('kb_max_dims', (16, 32)) double_decoder = cfg.get('double_decoder', False) tied_side_softmax = cfg.get('tied_side_softmax', False) do_pad_kb_keys = cfg.get('pad_kb_keys', True) if hasattr(kb_max_dims, '__iter__'): kb_max_dims = tuple(kb_max_dims) else: assert (type(kb_max_dims) == int), kb_max_dims kb_max_dims = (kb_max_dims,) assert cfg['decoder']['hidden_size'] dec_dropout = cfg['decoder'].get('dropout', 0.0) dec_emb_dropout = cfg['decoder']['embeddings'].get('dropout', dec_dropout) if (cfg['decoder'].get('type', 'recurrent') == 'transformer'): if tfstyletf: decoder = TransformerDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, kb_key_emb_size=kbsrc_embed.embedding_dim, feed_kb_hidden=kb_input_feeding, infeedkb=infeedkb, outfeedkb=outfeedkb, double_decoder=double_decoder) else: decoder = TransformerKBrnnDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, kb_task=kb_task, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed) elif (not kb_task): decoder = RecurrentDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout) else: decoder = KeyValRetRNNDecoder(**cfg['decoder'], encoder=encoder, vocab_size=len(trg_vocab), emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout, k_hops=k_hops, kb_max=kb_max_dims, same_module_for_all_hops=same_module_for_all_hops, kb_key_emb_size=kbsrc_embed.embedding_dim, kb_input_feeding=kb_input_feeding, kb_feed_rnn=kb_feed_rnn, kb_multihead_feed=kb_multihead_feed, do_pad_kb_keys=do_pad_kb_keys) generator = Generator(dec_hidden_size=cfg['decoder']['hidden_size'], vocab_size=len(trg_vocab), add_kb_biases_to_output=add_kb_biases_to_output, double_decoder=double_decoder) model = Model(encoder=encoder, decoder=decoder, generator=generator, src_embed=src_embed, trg_embed=trg_embed, src_vocab=src_vocab, trg_vocab=trg_vocab, kb_key_embed=kbsrc_embed, trv_vocab=trv_vocab, k_hops=k_hops, do_postproc=do_postproc, canonize=canonization_func, kb_att_dims=len(kb_max_dims), posEncKBkeys=posEncKBkeys) if cfg.get('tied_softmax', False): if (trg_embed.lut.weight.shape == model.generator.output_layer.weight.shape): model.generator.output_layer.weight = trg_embed.lut.weight if model.generator.double_decoder: assert hasattr(model.generator, 'side_output_layer') if tied_side_softmax: model.generator.side_output_layer.weight = trg_embed.lut.weight else: raise ConfigurationError('For tied_softmax, the decoder embedding_dim and decoder hidden_size must be the same.The decoder must be a Transformer.') initialize_model(model, cfg, src_padding_idx, trg_padding_idx) return model<|docstring|>Build and initialize the model according to the configuration. :param cfg: dictionary configuration containing model specifications :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :param trv_vocab: kb true value lookup vocabulary :return: built and initialized model<|endoftext|>
a4342208f732e02d68eb54d73044ef236d6d6875284344dbdea4226e17248b43
def __init__(self, encoder: Encoder, decoder: Decoder, generator: nn.Module, src_embed: Embeddings, trg_embed: Embeddings, src_vocab: Vocabulary, trg_vocab: Vocabulary, kb_key_embed: Embeddings, trv_vocab: Vocabulary=None, k_hops: int=1, do_postproc: bool=True, canonize=None, kb_att_dims: int=1, posEncKBkeys: bool=False) -> None: '\n Create a new encoder-decoder model\n\n :param encoder: encoder\n :param decoder: decoder\n :param src_embed: source embedding\n :param trg_embed: target embedding\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :param k_hops: number of kvr attention forward passes to do\n :param do_postproc: do postprocessing (decode canonical tokens) in KVR task?\n :param canonize: callable canonization object to try to create KB on the fly if none exists; not used by model but piggybacks off it\n :param kb_att_dims: number of dimensions of KB \n :param posEncdKBkeys: apply positional encoding to KB keys?\n ' super(Model, self).__init__() self.src_embed = src_embed self.trg_embed = trg_embed self.encoder = encoder self.decoder = decoder self.generator = generator self.src_vocab = src_vocab self.trg_vocab = trg_vocab self.bos_index = self.trg_vocab.stoi[BOS_TOKEN] self.pad_index = self.trg_vocab.stoi[PAD_TOKEN] self.eos_index = self.trg_vocab.stoi[EOS_TOKEN] self.kbsrc_embed = (kb_key_embed if (kb_key_embed is not None) else self.src_embed) if (trv_vocab != None): self.trv_vocab = trv_vocab self.pad_idx_kbsrc = self.src_vocab.stoi[PAD_TOKEN] self.eos_idx_src = self.src_vocab.stoi[EOS_TOKEN] self.k_hops = k_hops self.do_postproc = do_postproc self.canonize = canonize self.kb_att_dims = kb_att_dims if posEncKBkeys: try: decoder_hidden_size = self.decoder.hidden_size except AttributeError: decoder_hidden_size = self.decoder._hidden_size self.posEnc = PositionalEncoding(decoder_hidden_size, e=2) self.Timer = Timer()
Create a new encoder-decoder model :param encoder: encoder :param decoder: decoder :param src_embed: source embedding :param trg_embed: target embedding :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :param trv_vocab: kb true value lookup vocabulary :param k_hops: number of kvr attention forward passes to do :param do_postproc: do postprocessing (decode canonical tokens) in KVR task? :param canonize: callable canonization object to try to create KB on the fly if none exists; not used by model but piggybacks off it :param kb_att_dims: number of dimensions of KB :param posEncdKBkeys: apply positional encoding to KB keys?
joeynmt/model.py
__init__
marvosyntactical/joeynmt
3
python
def __init__(self, encoder: Encoder, decoder: Decoder, generator: nn.Module, src_embed: Embeddings, trg_embed: Embeddings, src_vocab: Vocabulary, trg_vocab: Vocabulary, kb_key_embed: Embeddings, trv_vocab: Vocabulary=None, k_hops: int=1, do_postproc: bool=True, canonize=None, kb_att_dims: int=1, posEncKBkeys: bool=False) -> None: '\n Create a new encoder-decoder model\n\n :param encoder: encoder\n :param decoder: decoder\n :param src_embed: source embedding\n :param trg_embed: target embedding\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :param k_hops: number of kvr attention forward passes to do\n :param do_postproc: do postprocessing (decode canonical tokens) in KVR task?\n :param canonize: callable canonization object to try to create KB on the fly if none exists; not used by model but piggybacks off it\n :param kb_att_dims: number of dimensions of KB \n :param posEncdKBkeys: apply positional encoding to KB keys?\n ' super(Model, self).__init__() self.src_embed = src_embed self.trg_embed = trg_embed self.encoder = encoder self.decoder = decoder self.generator = generator self.src_vocab = src_vocab self.trg_vocab = trg_vocab self.bos_index = self.trg_vocab.stoi[BOS_TOKEN] self.pad_index = self.trg_vocab.stoi[PAD_TOKEN] self.eos_index = self.trg_vocab.stoi[EOS_TOKEN] self.kbsrc_embed = (kb_key_embed if (kb_key_embed is not None) else self.src_embed) if (trv_vocab != None): self.trv_vocab = trv_vocab self.pad_idx_kbsrc = self.src_vocab.stoi[PAD_TOKEN] self.eos_idx_src = self.src_vocab.stoi[EOS_TOKEN] self.k_hops = k_hops self.do_postproc = do_postproc self.canonize = canonize self.kb_att_dims = kb_att_dims if posEncKBkeys: try: decoder_hidden_size = self.decoder.hidden_size except AttributeError: decoder_hidden_size = self.decoder._hidden_size self.posEnc = PositionalEncoding(decoder_hidden_size, e=2) self.Timer = Timer()
def __init__(self, encoder: Encoder, decoder: Decoder, generator: nn.Module, src_embed: Embeddings, trg_embed: Embeddings, src_vocab: Vocabulary, trg_vocab: Vocabulary, kb_key_embed: Embeddings, trv_vocab: Vocabulary=None, k_hops: int=1, do_postproc: bool=True, canonize=None, kb_att_dims: int=1, posEncKBkeys: bool=False) -> None: '\n Create a new encoder-decoder model\n\n :param encoder: encoder\n :param decoder: decoder\n :param src_embed: source embedding\n :param trg_embed: target embedding\n :param src_vocab: source vocabulary\n :param trg_vocab: target vocabulary\n :param trv_vocab: kb true value lookup vocabulary\n :param k_hops: number of kvr attention forward passes to do\n :param do_postproc: do postprocessing (decode canonical tokens) in KVR task?\n :param canonize: callable canonization object to try to create KB on the fly if none exists; not used by model but piggybacks off it\n :param kb_att_dims: number of dimensions of KB \n :param posEncdKBkeys: apply positional encoding to KB keys?\n ' super(Model, self).__init__() self.src_embed = src_embed self.trg_embed = trg_embed self.encoder = encoder self.decoder = decoder self.generator = generator self.src_vocab = src_vocab self.trg_vocab = trg_vocab self.bos_index = self.trg_vocab.stoi[BOS_TOKEN] self.pad_index = self.trg_vocab.stoi[PAD_TOKEN] self.eos_index = self.trg_vocab.stoi[EOS_TOKEN] self.kbsrc_embed = (kb_key_embed if (kb_key_embed is not None) else self.src_embed) if (trv_vocab != None): self.trv_vocab = trv_vocab self.pad_idx_kbsrc = self.src_vocab.stoi[PAD_TOKEN] self.eos_idx_src = self.src_vocab.stoi[EOS_TOKEN] self.k_hops = k_hops self.do_postproc = do_postproc self.canonize = canonize self.kb_att_dims = kb_att_dims if posEncKBkeys: try: decoder_hidden_size = self.decoder.hidden_size except AttributeError: decoder_hidden_size = self.decoder._hidden_size self.posEnc = PositionalEncoding(decoder_hidden_size, e=2) self.Timer = Timer()<|docstring|>Create a new encoder-decoder model :param encoder: encoder :param decoder: decoder :param src_embed: source embedding :param trg_embed: target embedding :param src_vocab: source vocabulary :param trg_vocab: target vocabulary :param trv_vocab: kb true value lookup vocabulary :param k_hops: number of kvr attention forward passes to do :param do_postproc: do postprocessing (decode canonical tokens) in KVR task? :param canonize: callable canonization object to try to create KB on the fly if none exists; not used by model but piggybacks off it :param kb_att_dims: number of dimensions of KB :param posEncdKBkeys: apply positional encoding to KB keys?<|endoftext|>
8ba7b8f264666ffa95abcabd8f9dd4a63b430f14909a4b64be71c08477b4322d
def forward(self, src: Tensor, trg_input: Tensor, src_mask: Tensor, src_lengths: Tensor, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor, Tensor): '\n First encodes the source sentence.\n Then produces the target one word at a time.\n\n :param src: source input\n :param trg_input: target input\n :param src_mask: source mask\n :param src_lengths: length of source inputs\n :param trg_mask: target mask\n :return: decoder outputs\n ' (encoder_output, encoder_hidden) = self.encode(src=src, src_length=src_lengths, src_mask=src_mask) unroll_steps = trg_input.size(1) return self.decode(encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, trg_input=trg_input, unroll_steps=unroll_steps, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed)
First encodes the source sentence. Then produces the target one word at a time. :param src: source input :param trg_input: target input :param src_mask: source mask :param src_lengths: length of source inputs :param trg_mask: target mask :return: decoder outputs
joeynmt/model.py
forward
marvosyntactical/joeynmt
3
python
def forward(self, src: Tensor, trg_input: Tensor, src_mask: Tensor, src_lengths: Tensor, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor, Tensor): '\n First encodes the source sentence.\n Then produces the target one word at a time.\n\n :param src: source input\n :param trg_input: target input\n :param src_mask: source mask\n :param src_lengths: length of source inputs\n :param trg_mask: target mask\n :return: decoder outputs\n ' (encoder_output, encoder_hidden) = self.encode(src=src, src_length=src_lengths, src_mask=src_mask) unroll_steps = trg_input.size(1) return self.decode(encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, trg_input=trg_input, unroll_steps=unroll_steps, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed)
def forward(self, src: Tensor, trg_input: Tensor, src_mask: Tensor, src_lengths: Tensor, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor, Tensor): '\n First encodes the source sentence.\n Then produces the target one word at a time.\n\n :param src: source input\n :param trg_input: target input\n :param src_mask: source mask\n :param src_lengths: length of source inputs\n :param trg_mask: target mask\n :return: decoder outputs\n ' (encoder_output, encoder_hidden) = self.encode(src=src, src_length=src_lengths, src_mask=src_mask) unroll_steps = trg_input.size(1) return self.decode(encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, trg_input=trg_input, unroll_steps=unroll_steps, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed)<|docstring|>First encodes the source sentence. Then produces the target one word at a time. :param src: source input :param trg_input: target input :param src_mask: source mask :param src_lengths: length of source inputs :param trg_mask: target mask :return: decoder outputs<|endoftext|>
9b18ce5e182446fe2ce8a2e7bf03c52d5fcf35f421deeeea73b4332c23caa0c0
def encode(self, src: Tensor, src_length: Tensor, src_mask: Tensor) -> (Tensor, Tensor): '\n Encodes the source sentence.\n\n :param src:\n :param src_length:\n :param src_mask:\n :return: encoder outputs (output, hidden_concat)\n ' return self.encoder(self.src_embed(src), src_length, src_mask)
Encodes the source sentence. :param src: :param src_length: :param src_mask: :return: encoder outputs (output, hidden_concat)
joeynmt/model.py
encode
marvosyntactical/joeynmt
3
python
def encode(self, src: Tensor, src_length: Tensor, src_mask: Tensor) -> (Tensor, Tensor): '\n Encodes the source sentence.\n\n :param src:\n :param src_length:\n :param src_mask:\n :return: encoder outputs (output, hidden_concat)\n ' return self.encoder(self.src_embed(src), src_length, src_mask)
def encode(self, src: Tensor, src_length: Tensor, src_mask: Tensor) -> (Tensor, Tensor): '\n Encodes the source sentence.\n\n :param src:\n :param src_length:\n :param src_mask:\n :return: encoder outputs (output, hidden_concat)\n ' return self.encoder(self.src_embed(src), src_length, src_mask)<|docstring|>Encodes the source sentence. :param src: :param src_length: :param src_mask: :return: encoder outputs (output, hidden_concat)<|endoftext|>
7da7690cb9dfaeaf68aca79a9be7aaf2df3e8a6273ba69b626b04ebeb8f5b8f5
def decode(self, encoder_output: Tensor, encoder_hidden: Tensor, src_mask: Tensor, trg_input: Tensor, unroll_steps: int, decoder_hidden: Tensor=None, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask: Tensor=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor): '\n Decode, given an encoded source sentence.\n\n :param encoder_output: encoder states for attention computation\n :param encoder_hidden: last encoder state for decoder initialization\n :param src_mask: source mask, 1 at valid tokens\n :param trg_input: target inputs\n :param unroll_steps: number of steps to unrol the decoder for\n :param decoder_hidden: decoder hidden state (optional)\n :param trg_mask: mask for target steps\n :return: decoder outputs (outputs, hidden, att_probs, att_vectors)\n ' return self.decoder(trg_embed=self.trg_embed(trg_input), encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, unroll_steps=unroll_steps, hidden=decoder_hidden, trg_mask=trg_mask, kb_keys=kb_keys, k_hops=self.k_hops, kb_mask=kb_mask, kb_values_embed=kb_values_embed)
Decode, given an encoded source sentence. :param encoder_output: encoder states for attention computation :param encoder_hidden: last encoder state for decoder initialization :param src_mask: source mask, 1 at valid tokens :param trg_input: target inputs :param unroll_steps: number of steps to unrol the decoder for :param decoder_hidden: decoder hidden state (optional) :param trg_mask: mask for target steps :return: decoder outputs (outputs, hidden, att_probs, att_vectors)
joeynmt/model.py
decode
marvosyntactical/joeynmt
3
python
def decode(self, encoder_output: Tensor, encoder_hidden: Tensor, src_mask: Tensor, trg_input: Tensor, unroll_steps: int, decoder_hidden: Tensor=None, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask: Tensor=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor): '\n Decode, given an encoded source sentence.\n\n :param encoder_output: encoder states for attention computation\n :param encoder_hidden: last encoder state for decoder initialization\n :param src_mask: source mask, 1 at valid tokens\n :param trg_input: target inputs\n :param unroll_steps: number of steps to unrol the decoder for\n :param decoder_hidden: decoder hidden state (optional)\n :param trg_mask: mask for target steps\n :return: decoder outputs (outputs, hidden, att_probs, att_vectors)\n ' return self.decoder(trg_embed=self.trg_embed(trg_input), encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, unroll_steps=unroll_steps, hidden=decoder_hidden, trg_mask=trg_mask, kb_keys=kb_keys, k_hops=self.k_hops, kb_mask=kb_mask, kb_values_embed=kb_values_embed)
def decode(self, encoder_output: Tensor, encoder_hidden: Tensor, src_mask: Tensor, trg_input: Tensor, unroll_steps: int, decoder_hidden: Tensor=None, trg_mask: Tensor=None, kb_keys: Tensor=None, kb_mask: Tensor=None, kb_values_embed=None) -> (Tensor, Tensor, Tensor, Tensor): '\n Decode, given an encoded source sentence.\n\n :param encoder_output: encoder states for attention computation\n :param encoder_hidden: last encoder state for decoder initialization\n :param src_mask: source mask, 1 at valid tokens\n :param trg_input: target inputs\n :param unroll_steps: number of steps to unrol the decoder for\n :param decoder_hidden: decoder hidden state (optional)\n :param trg_mask: mask for target steps\n :return: decoder outputs (outputs, hidden, att_probs, att_vectors)\n ' return self.decoder(trg_embed=self.trg_embed(trg_input), encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=src_mask, unroll_steps=unroll_steps, hidden=decoder_hidden, trg_mask=trg_mask, kb_keys=kb_keys, k_hops=self.k_hops, kb_mask=kb_mask, kb_values_embed=kb_values_embed)<|docstring|>Decode, given an encoded source sentence. :param encoder_output: encoder states for attention computation :param encoder_hidden: last encoder state for decoder initialization :param src_mask: source mask, 1 at valid tokens :param trg_input: target inputs :param unroll_steps: number of steps to unrol the decoder for :param decoder_hidden: decoder hidden state (optional) :param trg_mask: mask for target steps :return: decoder outputs (outputs, hidden, att_probs, att_vectors)<|endoftext|>
f9bf959f42f324c00ff145bbdcab6655b83a1c541dc4ee6a06c8ed1996f50c55
def get_loss_for_batch(self, batch: Batch, loss_function: nn.Module, max_output_length: int=None, e_i: float=1.0, greedy_threshold: float=0.9) -> Tensor: '\n Compute non-normalized loss and number of tokens for a batch\n\n :param batch: batch to compute loss for\n :param loss_function: loss function, computes for input and target\n a scalar loss for the complete batch\n :param max_output_length: maximum length of hypotheses\n :param e_i: scheduled sampling probability of taking true label vs model generation at every decoding step\n (https://arxiv.org/abs/1506.03099 Section 2.4)\n :param greedy_threshold: only actually do greedy search once e_i is below this threshold\n :return: batch_loss: sum of losses over non-pad elements in the batch\n ' print(f''' {('-' * 10)}GET LOSS FWD PASS: START current batch{('-' * 10)} ''') assert (0.0 <= e_i <= 1.0), f'e_i={e_i} should be a probability' do_teacher_force = (e_i >= greedy_threshold) (trg, trg_input, trg_mask) = (batch.trg, batch.trg_input, batch.trg_mask) batch_size = trg.size(0) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, _, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) else: kb_keys = None log_probs = None if (kb_keys is not None): assert (batch.kbsrc != None), batch.kbsrc if hasattr(batch, 'trgcanon'): assert (batch.trgcanon.shape[0] == batch.trg.shape[0]), [t.shape for t in [batch.trg, batch.trgcanon]] (trg, trg_input, trg_mask) = (batch.trgcanon, batch.trgcanon_input, batch.trgcanon_mask) if (not do_teacher_force): with self.Timer('model training: KB Task: do greedy search'): (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) print(f'in model.glfb; kb_keys are {kb_keys}') (stacked_output, stacked_attention_scores, stacked_kb_att_scores, log_probs) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=trg.size((- 1)), knowledgebase=(kb_keys, kb_values, kb_values_embed, kb_mask), trg_input=trg_input, e_i=e_i) else: with self.Timer('model training: KB Task: model fwd pass'): (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed) else: if (not do_teacher_force): raise NotImplementedError('scheduled sampling only works for KB task atm') (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask) kb_values = None if (log_probs is None): log_probs = self.generator(out, kb_probs=kb_probs, kb_values=kb_values) if hasattr(batch, 'trgcanon'): assert (not log_probs.requires_grad), "using batch.trgcanon shouldnt happen / be done during training (canonized data is used in the 'trg' field there)" assert (log_probs.size((- 1)) == self.generator.output_size), (log_probs.shape, self.generator.output_size) try: batch_loss = loss_function(log_probs, trg) except Exception as e: print(f'batch_size: {batch_size}') print(f'log_probs= {log_probs.shape}') print(f'trg = {trg.shape}') print(f'') print(f'') raise e with self.Timer('debugging: greedy hypothesis:'): mle_tokens = argmax(log_probs, dim=(- 1)) mle_tokens = mle_tokens.cpu().numpy() print(f'proc_batch: Hypothesis: {self.trg_vocab.arrays_to_sentences(mle_tokens)[(- 1)]}') print(f''' {('-' * 10)}GET LOSS FWD PASS: END current batch{('-' * 10)} ''') return batch_loss
Compute non-normalized loss and number of tokens for a batch :param batch: batch to compute loss for :param loss_function: loss function, computes for input and target a scalar loss for the complete batch :param max_output_length: maximum length of hypotheses :param e_i: scheduled sampling probability of taking true label vs model generation at every decoding step (https://arxiv.org/abs/1506.03099 Section 2.4) :param greedy_threshold: only actually do greedy search once e_i is below this threshold :return: batch_loss: sum of losses over non-pad elements in the batch
joeynmt/model.py
get_loss_for_batch
marvosyntactical/joeynmt
3
python
def get_loss_for_batch(self, batch: Batch, loss_function: nn.Module, max_output_length: int=None, e_i: float=1.0, greedy_threshold: float=0.9) -> Tensor: '\n Compute non-normalized loss and number of tokens for a batch\n\n :param batch: batch to compute loss for\n :param loss_function: loss function, computes for input and target\n a scalar loss for the complete batch\n :param max_output_length: maximum length of hypotheses\n :param e_i: scheduled sampling probability of taking true label vs model generation at every decoding step\n (https://arxiv.org/abs/1506.03099 Section 2.4)\n :param greedy_threshold: only actually do greedy search once e_i is below this threshold\n :return: batch_loss: sum of losses over non-pad elements in the batch\n ' print(f' {('-' * 10)}GET LOSS FWD PASS: START current batch{('-' * 10)} ') assert (0.0 <= e_i <= 1.0), f'e_i={e_i} should be a probability' do_teacher_force = (e_i >= greedy_threshold) (trg, trg_input, trg_mask) = (batch.trg, batch.trg_input, batch.trg_mask) batch_size = trg.size(0) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, _, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) else: kb_keys = None log_probs = None if (kb_keys is not None): assert (batch.kbsrc != None), batch.kbsrc if hasattr(batch, 'trgcanon'): assert (batch.trgcanon.shape[0] == batch.trg.shape[0]), [t.shape for t in [batch.trg, batch.trgcanon]] (trg, trg_input, trg_mask) = (batch.trgcanon, batch.trgcanon_input, batch.trgcanon_mask) if (not do_teacher_force): with self.Timer('model training: KB Task: do greedy search'): (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) print(f'in model.glfb; kb_keys are {kb_keys}') (stacked_output, stacked_attention_scores, stacked_kb_att_scores, log_probs) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=trg.size((- 1)), knowledgebase=(kb_keys, kb_values, kb_values_embed, kb_mask), trg_input=trg_input, e_i=e_i) else: with self.Timer('model training: KB Task: model fwd pass'): (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed) else: if (not do_teacher_force): raise NotImplementedError('scheduled sampling only works for KB task atm') (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask) kb_values = None if (log_probs is None): log_probs = self.generator(out, kb_probs=kb_probs, kb_values=kb_values) if hasattr(batch, 'trgcanon'): assert (not log_probs.requires_grad), "using batch.trgcanon shouldnt happen / be done during training (canonized data is used in the 'trg' field there)" assert (log_probs.size((- 1)) == self.generator.output_size), (log_probs.shape, self.generator.output_size) try: batch_loss = loss_function(log_probs, trg) except Exception as e: print(f'batch_size: {batch_size}') print(f'log_probs= {log_probs.shape}') print(f'trg = {trg.shape}') print(f) print(f) raise e with self.Timer('debugging: greedy hypothesis:'): mle_tokens = argmax(log_probs, dim=(- 1)) mle_tokens = mle_tokens.cpu().numpy() print(f'proc_batch: Hypothesis: {self.trg_vocab.arrays_to_sentences(mle_tokens)[(- 1)]}') print(f' {('-' * 10)}GET LOSS FWD PASS: END current batch{('-' * 10)} ') return batch_loss
def get_loss_for_batch(self, batch: Batch, loss_function: nn.Module, max_output_length: int=None, e_i: float=1.0, greedy_threshold: float=0.9) -> Tensor: '\n Compute non-normalized loss and number of tokens for a batch\n\n :param batch: batch to compute loss for\n :param loss_function: loss function, computes for input and target\n a scalar loss for the complete batch\n :param max_output_length: maximum length of hypotheses\n :param e_i: scheduled sampling probability of taking true label vs model generation at every decoding step\n (https://arxiv.org/abs/1506.03099 Section 2.4)\n :param greedy_threshold: only actually do greedy search once e_i is below this threshold\n :return: batch_loss: sum of losses over non-pad elements in the batch\n ' print(f' {('-' * 10)}GET LOSS FWD PASS: START current batch{('-' * 10)} ') assert (0.0 <= e_i <= 1.0), f'e_i={e_i} should be a probability' do_teacher_force = (e_i >= greedy_threshold) (trg, trg_input, trg_mask) = (batch.trg, batch.trg_input, batch.trg_mask) batch_size = trg.size(0) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, _, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) else: kb_keys = None log_probs = None if (kb_keys is not None): assert (batch.kbsrc != None), batch.kbsrc if hasattr(batch, 'trgcanon'): assert (batch.trgcanon.shape[0] == batch.trg.shape[0]), [t.shape for t in [batch.trg, batch.trgcanon]] (trg, trg_input, trg_mask) = (batch.trgcanon, batch.trgcanon_input, batch.trgcanon_mask) if (not do_teacher_force): with self.Timer('model training: KB Task: do greedy search'): (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) print(f'in model.glfb; kb_keys are {kb_keys}') (stacked_output, stacked_attention_scores, stacked_kb_att_scores, log_probs) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=trg.size((- 1)), knowledgebase=(kb_keys, kb_values, kb_values_embed, kb_mask), trg_input=trg_input, e_i=e_i) else: with self.Timer('model training: KB Task: model fwd pass'): (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask, kb_keys=kb_keys, kb_mask=kb_mask, kb_values_embed=kb_values_embed) else: if (not do_teacher_force): raise NotImplementedError('scheduled sampling only works for KB task atm') (hidden, att_probs, out, kb_probs, _, _) = self.forward(src=batch.src, trg_input=trg_input, src_mask=batch.src_mask, src_lengths=batch.src_lengths, trg_mask=trg_mask) kb_values = None if (log_probs is None): log_probs = self.generator(out, kb_probs=kb_probs, kb_values=kb_values) if hasattr(batch, 'trgcanon'): assert (not log_probs.requires_grad), "using batch.trgcanon shouldnt happen / be done during training (canonized data is used in the 'trg' field there)" assert (log_probs.size((- 1)) == self.generator.output_size), (log_probs.shape, self.generator.output_size) try: batch_loss = loss_function(log_probs, trg) except Exception as e: print(f'batch_size: {batch_size}') print(f'log_probs= {log_probs.shape}') print(f'trg = {trg.shape}') print(f) print(f) raise e with self.Timer('debugging: greedy hypothesis:'): mle_tokens = argmax(log_probs, dim=(- 1)) mle_tokens = mle_tokens.cpu().numpy() print(f'proc_batch: Hypothesis: {self.trg_vocab.arrays_to_sentences(mle_tokens)[(- 1)]}') print(f' {('-' * 10)}GET LOSS FWD PASS: END current batch{('-' * 10)} ') return batch_loss<|docstring|>Compute non-normalized loss and number of tokens for a batch :param batch: batch to compute loss for :param loss_function: loss function, computes for input and target a scalar loss for the complete batch :param max_output_length: maximum length of hypotheses :param e_i: scheduled sampling probability of taking true label vs model generation at every decoding step (https://arxiv.org/abs/1506.03099 Section 2.4) :param greedy_threshold: only actually do greedy search once e_i is below this threshold :return: batch_loss: sum of losses over non-pad elements in the batch<|endoftext|>
a7e7930bdc91a7d858a476af8509cad37c07f8631c62efe682ed8e3d2bb5ac78
def run_batch(self, batch: Batch, max_output_length: int, beam_size: int, beam_alpha: float) -> (np.array, np.array): '\n Get outputs and attentions scores for a given batch\n\n :param batch: batch to generate hypotheses for\n :param max_output_length: maximum length of hypotheses\n :param beam_size: size of the beam for beam search, if 0 use greedy\n :param beam_alpha: alpha value for beam search\n :return: \n stacked_output: hypotheses for batch,\n stacked_attention_scores: attention scores for batch\n ' (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, kb_trv, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) if (kb_keys is None): knowledgebase = None else: knowledgebase = (kb_keys, kb_values, kb_values_embed, kb_mask) else: knowledgebase = None if (beam_size == 0): (stacked_output, stacked_attention_scores, stacked_kb_att_scores, _) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=max_output_length, knowledgebase=knowledgebase) else: (stacked_output, stacked_attention_scores, stacked_kb_att_scores) = beam_search(decoder=self.decoder, generator=self.generator, size=beam_size, encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=batch.src_mask, embed=self.trg_embed, max_output_length=max_output_length, alpha=beam_alpha, eos_index=self.eos_index, pad_index=self.pad_index, bos_index=self.bos_index, knowledgebase=knowledgebase) if ((knowledgebase != None) and self.do_postproc): with self.Timer('postprocessing hypotheses'): stacked_output = self.postprocess_batch_hypotheses(stacked_output, stacked_kb_att_scores, kb_values, kb_trv) print(f'proc_batch: Hypotheses: {self.trv_vocab.arrays_to_sentences(stacked_output)}') else: print(f'proc_batch: Hypotheses: {self.trg_vocab.arrays_to_sentences(stacked_output)}') return (stacked_output, stacked_attention_scores, stacked_kb_att_scores)
Get outputs and attentions scores for a given batch :param batch: batch to generate hypotheses for :param max_output_length: maximum length of hypotheses :param beam_size: size of the beam for beam search, if 0 use greedy :param beam_alpha: alpha value for beam search :return: stacked_output: hypotheses for batch, stacked_attention_scores: attention scores for batch
joeynmt/model.py
run_batch
marvosyntactical/joeynmt
3
python
def run_batch(self, batch: Batch, max_output_length: int, beam_size: int, beam_alpha: float) -> (np.array, np.array): '\n Get outputs and attentions scores for a given batch\n\n :param batch: batch to generate hypotheses for\n :param max_output_length: maximum length of hypotheses\n :param beam_size: size of the beam for beam search, if 0 use greedy\n :param beam_alpha: alpha value for beam search\n :return: \n stacked_output: hypotheses for batch,\n stacked_attention_scores: attention scores for batch\n ' (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, kb_trv, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) if (kb_keys is None): knowledgebase = None else: knowledgebase = (kb_keys, kb_values, kb_values_embed, kb_mask) else: knowledgebase = None if (beam_size == 0): (stacked_output, stacked_attention_scores, stacked_kb_att_scores, _) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=max_output_length, knowledgebase=knowledgebase) else: (stacked_output, stacked_attention_scores, stacked_kb_att_scores) = beam_search(decoder=self.decoder, generator=self.generator, size=beam_size, encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=batch.src_mask, embed=self.trg_embed, max_output_length=max_output_length, alpha=beam_alpha, eos_index=self.eos_index, pad_index=self.pad_index, bos_index=self.bos_index, knowledgebase=knowledgebase) if ((knowledgebase != None) and self.do_postproc): with self.Timer('postprocessing hypotheses'): stacked_output = self.postprocess_batch_hypotheses(stacked_output, stacked_kb_att_scores, kb_values, kb_trv) print(f'proc_batch: Hypotheses: {self.trv_vocab.arrays_to_sentences(stacked_output)}') else: print(f'proc_batch: Hypotheses: {self.trg_vocab.arrays_to_sentences(stacked_output)}') return (stacked_output, stacked_attention_scores, stacked_kb_att_scores)
def run_batch(self, batch: Batch, max_output_length: int, beam_size: int, beam_alpha: float) -> (np.array, np.array): '\n Get outputs and attentions scores for a given batch\n\n :param batch: batch to generate hypotheses for\n :param max_output_length: maximum length of hypotheses\n :param beam_size: size of the beam for beam search, if 0 use greedy\n :param beam_alpha: alpha value for beam search\n :return: \n stacked_output: hypotheses for batch,\n stacked_attention_scores: attention scores for batch\n ' (encoder_output, encoder_hidden) = self.encode(batch.src, batch.src_lengths, batch.src_mask) if (max_output_length is None): max_output_length = int((max(batch.src_lengths.cpu().numpy()) * 1.5)) if hasattr(batch, 'kbsrc'): (kb_keys, kb_values, kb_values_embed, kb_trv, kb_mask) = self.preprocess_batch_kb(batch, kbattdims=self.kb_att_dims) if (kb_keys is None): knowledgebase = None else: knowledgebase = (kb_keys, kb_values, kb_values_embed, kb_mask) else: knowledgebase = None if (beam_size == 0): (stacked_output, stacked_attention_scores, stacked_kb_att_scores, _) = greedy(encoder_hidden=encoder_hidden, encoder_output=encoder_output, src_mask=batch.src_mask, embed=self.trg_embed, bos_index=self.bos_index, decoder=self.decoder, generator=self.generator, max_output_length=max_output_length, knowledgebase=knowledgebase) else: (stacked_output, stacked_attention_scores, stacked_kb_att_scores) = beam_search(decoder=self.decoder, generator=self.generator, size=beam_size, encoder_output=encoder_output, encoder_hidden=encoder_hidden, src_mask=batch.src_mask, embed=self.trg_embed, max_output_length=max_output_length, alpha=beam_alpha, eos_index=self.eos_index, pad_index=self.pad_index, bos_index=self.bos_index, knowledgebase=knowledgebase) if ((knowledgebase != None) and self.do_postproc): with self.Timer('postprocessing hypotheses'): stacked_output = self.postprocess_batch_hypotheses(stacked_output, stacked_kb_att_scores, kb_values, kb_trv) print(f'proc_batch: Hypotheses: {self.trv_vocab.arrays_to_sentences(stacked_output)}') else: print(f'proc_batch: Hypotheses: {self.trg_vocab.arrays_to_sentences(stacked_output)}') return (stacked_output, stacked_attention_scores, stacked_kb_att_scores)<|docstring|>Get outputs and attentions scores for a given batch :param batch: batch to generate hypotheses for :param max_output_length: maximum length of hypotheses :param beam_size: size of the beam for beam search, if 0 use greedy :param beam_alpha: alpha value for beam search :return: stacked_output: hypotheses for batch, stacked_attention_scores: attention scores for batch<|endoftext|>
e8230c1dfb51b996f5844d0a5a32f9062fd50a52952d8c135a5f56a2e87237f6
def postprocess_batch_hypotheses(self, stacked_output, stacked_kb_att_scores, kb_values, kb_truval, pad_value=float('-inf')) -> np.array: '\n called in self.run_batch() during knowledgebase task\n\n postprocesses batch hypotheses\n replaces kb value tokens such as @meeting_time with 7pm\n\n Arguments:\n :param stacked_output: array\n :param stacked_kb_att_scores: array\n :param kb_values: Tensor\n :param kb_truval: Tensor\n :param attention_pad_value: float indicating what parts of each attention matrix in the stacked_kb_att_score array to cut off in case of beam search\n :return: post_proc_stacked_output\n ' print(stacked_kb_att_scores.shape, kb_values.shape, kb_truval.shape) kb_trv = kb_truval.cpu().numpy()[(0, :)] kb_val = kb_values.cpu().numpy()[(0, :)] kb_att = stacked_kb_att_scores print('[[[[[[[[[[[[[[ START POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') trvSent = self.trv_vocab.array_to_sentence post_proc_stacked_output = [] outputs = stacked_output.tolist() for (i, hyp) in enumerate(outputs): post_proc_hyp = [] for (step, token) in enumerate(hyp): if (token == self.eos_index): break if (token >= self.trg_vocab.canon_onwards): kb_att_i = kb_att[i] '\n if (kb_att_i == pad_value).any():\n # try to remove padded columns\n\n idx_first_pad_ = (kb_att_i==pad_val).non_zero(as_tuple)[:,0].min().item()\n kb_att_i = kb_att_i[:idx_first_pad_,:]\n ' str_tok = trvSent([token]) hypotSent = self.trg_vocab.array_to_sentence(hyp) print(f''' pp: {('=' * 10)} DECIDING REPLACEMENT FOR CANONICAL: {str_tok} {('=' * 10)} ''') print(f'''pp: while deciding for hypothesis: {hypotSent}''') print(f'''pp: decoded hypothesis thus far: {trvSent(post_proc_hyp)}''') assert (str_tok[0] in hypotSent), (hyp, str_tok, hypotSent) matching_trv_candidates = np.where((kb_val == token), kb_trv, (- 1)) print(f'''pp: matching_trv_candidates tokens (should belong to same canonical): {trvSent(matching_trv_candidates[(matching_trv_candidates != (- 1))].tolist())}''') if matching_trv_candidates[(matching_trv_candidates != (- 1))].shape[0]: print(f'pp: SUCCESS! Found matches for canonical: {str_tok}') try: matching_scores = np.where((matching_trv_candidates != (- 1)), kb_att_i[(step, :)], float('-inf')) except Exception as e: print(stacked_kb_att_scores.shape) raise e print(f'''pp: matching_scores (should have no '-1's): {matching_scores}''') top_matching = np.argsort(matching_scores)[::(- 1)].copy() top_match_candids = matching_trv_candidates[top_matching] print(f'''pp: matching_trv_candidates in descending order of attention: {trvSent(top_match_candids[(top_match_candids != (- 1))].tolist())}''') top1_match = matching_trv_candidates[top_matching[0]] print(f'''pp: top1_match: {trvSent([top1_match])}''') assert (top1_match != (- 1)), 'somehow selected true value with non matching canonical category, shouldnt happen' post_proc_hyp.append(int(top1_match)) else: print(f'pp: FAILURE! Found no matches for canonical: {str_tok}') scores = kb_att_i[(step, :)] hi_scores = np.argsort(scores)[::(- 1)].copy() print(f'''pp: failure debug: highest attended tokens overall: {trvSent(kb_trv[hi_scores].tolist())}''') print(f'pp: CURRENT POLICY: REPLACING FOUND CANONICAL {str_tok} WITH NON-MATCHING HIGHEST ATTENDED') top1_but_not_matching = kb_trv[hi_scores[0]] post_proc_hyp.append(top1_but_not_matching) print(f''' pp: {('+' * 10)} DECIDED REPLACEMENT FOR CANONICAL: {str_tok}: {trvSent([post_proc_hyp[(- 1)]])} {('+' * 10)} ''') else: post_proc_hyp.append(token) print(f'pp: finished hyp: {trvSent(post_proc_hyp)}, hyp past first <EOS> would be: {trvSent(post_proc_hyp, cut_at_eos=False)}') post_proc_stacked_output.append(post_proc_hyp) print() print(f'''pp: raw hyps: {self.trg_vocab.arrays_to_sentences(outputs)}''') print() print(f'''pp: post processed hyps: {self.trv_vocab.arrays_to_sentences(post_proc_stacked_output)}''') print() print(f'pp: knowledgebase: {trvSent(kb_trv.tolist()[:40])}') print() print('[[[[[[[[[[[[[[ END POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') post_proc_stacked_output = np.array(post_proc_stacked_output) return post_proc_stacked_output
called in self.run_batch() during knowledgebase task postprocesses batch hypotheses replaces kb value tokens such as @meeting_time with 7pm Arguments: :param stacked_output: array :param stacked_kb_att_scores: array :param kb_values: Tensor :param kb_truval: Tensor :param attention_pad_value: float indicating what parts of each attention matrix in the stacked_kb_att_score array to cut off in case of beam search :return: post_proc_stacked_output
joeynmt/model.py
postprocess_batch_hypotheses
marvosyntactical/joeynmt
3
python
def postprocess_batch_hypotheses(self, stacked_output, stacked_kb_att_scores, kb_values, kb_truval, pad_value=float('-inf')) -> np.array: '\n called in self.run_batch() during knowledgebase task\n\n postprocesses batch hypotheses\n replaces kb value tokens such as @meeting_time with 7pm\n\n Arguments:\n :param stacked_output: array\n :param stacked_kb_att_scores: array\n :param kb_values: Tensor\n :param kb_truval: Tensor\n :param attention_pad_value: float indicating what parts of each attention matrix in the stacked_kb_att_score array to cut off in case of beam search\n :return: post_proc_stacked_output\n ' print(stacked_kb_att_scores.shape, kb_values.shape, kb_truval.shape) kb_trv = kb_truval.cpu().numpy()[(0, :)] kb_val = kb_values.cpu().numpy()[(0, :)] kb_att = stacked_kb_att_scores print('[[[[[[[[[[[[[[ START POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') trvSent = self.trv_vocab.array_to_sentence post_proc_stacked_output = [] outputs = stacked_output.tolist() for (i, hyp) in enumerate(outputs): post_proc_hyp = [] for (step, token) in enumerate(hyp): if (token == self.eos_index): break if (token >= self.trg_vocab.canon_onwards): kb_att_i = kb_att[i] '\n if (kb_att_i == pad_value).any():\n # try to remove padded columns\n\n idx_first_pad_ = (kb_att_i==pad_val).non_zero(as_tuple)[:,0].min().item()\n kb_att_i = kb_att_i[:idx_first_pad_,:]\n ' str_tok = trvSent([token]) hypotSent = self.trg_vocab.array_to_sentence(hyp) print(f' pp: {('=' * 10)} DECIDING REPLACEMENT FOR CANONICAL: {str_tok} {('=' * 10)} ') print(f'pp: while deciding for hypothesis: {hypotSent}') print(f'pp: decoded hypothesis thus far: {trvSent(post_proc_hyp)}') assert (str_tok[0] in hypotSent), (hyp, str_tok, hypotSent) matching_trv_candidates = np.where((kb_val == token), kb_trv, (- 1)) print(f'pp: matching_trv_candidates tokens (should belong to same canonical): {trvSent(matching_trv_candidates[(matching_trv_candidates != (- 1))].tolist())}') if matching_trv_candidates[(matching_trv_candidates != (- 1))].shape[0]: print(f'pp: SUCCESS! Found matches for canonical: {str_tok}') try: matching_scores = np.where((matching_trv_candidates != (- 1)), kb_att_i[(step, :)], float('-inf')) except Exception as e: print(stacked_kb_att_scores.shape) raise e print(f'pp: matching_scores (should have no '-1's): {matching_scores}') top_matching = np.argsort(matching_scores)[::(- 1)].copy() top_match_candids = matching_trv_candidates[top_matching] print(f'pp: matching_trv_candidates in descending order of attention: {trvSent(top_match_candids[(top_match_candids != (- 1))].tolist())}') top1_match = matching_trv_candidates[top_matching[0]] print(f'pp: top1_match: {trvSent([top1_match])}') assert (top1_match != (- 1)), 'somehow selected true value with non matching canonical category, shouldnt happen' post_proc_hyp.append(int(top1_match)) else: print(f'pp: FAILURE! Found no matches for canonical: {str_tok}') scores = kb_att_i[(step, :)] hi_scores = np.argsort(scores)[::(- 1)].copy() print(f'pp: failure debug: highest attended tokens overall: {trvSent(kb_trv[hi_scores].tolist())}') print(f'pp: CURRENT POLICY: REPLACING FOUND CANONICAL {str_tok} WITH NON-MATCHING HIGHEST ATTENDED') top1_but_not_matching = kb_trv[hi_scores[0]] post_proc_hyp.append(top1_but_not_matching) print(f' pp: {('+' * 10)} DECIDED REPLACEMENT FOR CANONICAL: {str_tok}: {trvSent([post_proc_hyp[(- 1)]])} {('+' * 10)} ') else: post_proc_hyp.append(token) print(f'pp: finished hyp: {trvSent(post_proc_hyp)}, hyp past first <EOS> would be: {trvSent(post_proc_hyp, cut_at_eos=False)}') post_proc_stacked_output.append(post_proc_hyp) print() print(f'pp: raw hyps: {self.trg_vocab.arrays_to_sentences(outputs)}') print() print(f'pp: post processed hyps: {self.trv_vocab.arrays_to_sentences(post_proc_stacked_output)}') print() print(f'pp: knowledgebase: {trvSent(kb_trv.tolist()[:40])}') print() print('[[[[[[[[[[[[[[ END POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') post_proc_stacked_output = np.array(post_proc_stacked_output) return post_proc_stacked_output
def postprocess_batch_hypotheses(self, stacked_output, stacked_kb_att_scores, kb_values, kb_truval, pad_value=float('-inf')) -> np.array: '\n called in self.run_batch() during knowledgebase task\n\n postprocesses batch hypotheses\n replaces kb value tokens such as @meeting_time with 7pm\n\n Arguments:\n :param stacked_output: array\n :param stacked_kb_att_scores: array\n :param kb_values: Tensor\n :param kb_truval: Tensor\n :param attention_pad_value: float indicating what parts of each attention matrix in the stacked_kb_att_score array to cut off in case of beam search\n :return: post_proc_stacked_output\n ' print(stacked_kb_att_scores.shape, kb_values.shape, kb_truval.shape) kb_trv = kb_truval.cpu().numpy()[(0, :)] kb_val = kb_values.cpu().numpy()[(0, :)] kb_att = stacked_kb_att_scores print('[[[[[[[[[[[[[[ START POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') trvSent = self.trv_vocab.array_to_sentence post_proc_stacked_output = [] outputs = stacked_output.tolist() for (i, hyp) in enumerate(outputs): post_proc_hyp = [] for (step, token) in enumerate(hyp): if (token == self.eos_index): break if (token >= self.trg_vocab.canon_onwards): kb_att_i = kb_att[i] '\n if (kb_att_i == pad_value).any():\n # try to remove padded columns\n\n idx_first_pad_ = (kb_att_i==pad_val).non_zero(as_tuple)[:,0].min().item()\n kb_att_i = kb_att_i[:idx_first_pad_,:]\n ' str_tok = trvSent([token]) hypotSent = self.trg_vocab.array_to_sentence(hyp) print(f' pp: {('=' * 10)} DECIDING REPLACEMENT FOR CANONICAL: {str_tok} {('=' * 10)} ') print(f'pp: while deciding for hypothesis: {hypotSent}') print(f'pp: decoded hypothesis thus far: {trvSent(post_proc_hyp)}') assert (str_tok[0] in hypotSent), (hyp, str_tok, hypotSent) matching_trv_candidates = np.where((kb_val == token), kb_trv, (- 1)) print(f'pp: matching_trv_candidates tokens (should belong to same canonical): {trvSent(matching_trv_candidates[(matching_trv_candidates != (- 1))].tolist())}') if matching_trv_candidates[(matching_trv_candidates != (- 1))].shape[0]: print(f'pp: SUCCESS! Found matches for canonical: {str_tok}') try: matching_scores = np.where((matching_trv_candidates != (- 1)), kb_att_i[(step, :)], float('-inf')) except Exception as e: print(stacked_kb_att_scores.shape) raise e print(f'pp: matching_scores (should have no '-1's): {matching_scores}') top_matching = np.argsort(matching_scores)[::(- 1)].copy() top_match_candids = matching_trv_candidates[top_matching] print(f'pp: matching_trv_candidates in descending order of attention: {trvSent(top_match_candids[(top_match_candids != (- 1))].tolist())}') top1_match = matching_trv_candidates[top_matching[0]] print(f'pp: top1_match: {trvSent([top1_match])}') assert (top1_match != (- 1)), 'somehow selected true value with non matching canonical category, shouldnt happen' post_proc_hyp.append(int(top1_match)) else: print(f'pp: FAILURE! Found no matches for canonical: {str_tok}') scores = kb_att_i[(step, :)] hi_scores = np.argsort(scores)[::(- 1)].copy() print(f'pp: failure debug: highest attended tokens overall: {trvSent(kb_trv[hi_scores].tolist())}') print(f'pp: CURRENT POLICY: REPLACING FOUND CANONICAL {str_tok} WITH NON-MATCHING HIGHEST ATTENDED') top1_but_not_matching = kb_trv[hi_scores[0]] post_proc_hyp.append(top1_but_not_matching) print(f' pp: {('+' * 10)} DECIDED REPLACEMENT FOR CANONICAL: {str_tok}: {trvSent([post_proc_hyp[(- 1)]])} {('+' * 10)} ') else: post_proc_hyp.append(token) print(f'pp: finished hyp: {trvSent(post_proc_hyp)}, hyp past first <EOS> would be: {trvSent(post_proc_hyp, cut_at_eos=False)}') post_proc_stacked_output.append(post_proc_hyp) print() print(f'pp: raw hyps: {self.trg_vocab.arrays_to_sentences(outputs)}') print() print(f'pp: post processed hyps: {self.trv_vocab.arrays_to_sentences(post_proc_stacked_output)}') print() print(f'pp: knowledgebase: {trvSent(kb_trv.tolist()[:40])}') print() print('[[[[[[[[[[[[[[ END POSTPROC VALID/TEST BATCH ]]]]]]]]]]]]]]') post_proc_stacked_output = np.array(post_proc_stacked_output) return post_proc_stacked_output<|docstring|>called in self.run_batch() during knowledgebase task postprocesses batch hypotheses replaces kb value tokens such as @meeting_time with 7pm Arguments: :param stacked_output: array :param stacked_kb_att_scores: array :param kb_values: Tensor :param kb_truval: Tensor :param attention_pad_value: float indicating what parts of each attention matrix in the stacked_kb_att_score array to cut off in case of beam search :return: post_proc_stacked_output<|endoftext|>
f8f84c47fbd654c028d7fa592970da3ed85315cd4bbcbe3eb31d134e78ec5677
def __repr__(self) -> str: '\n String representation: a description of encoder, decoder and embeddings\n\n :return: string representation\n ' return ('%s(\n\tencoder=%s,\n\tdecoder=%s,\n\tsrc_embed=%s,\n\ttrg_embed=%s)' % (self.__class__.__name__, self.encoder, self.decoder, self.src_embed, self.trg_embed))
String representation: a description of encoder, decoder and embeddings :return: string representation
joeynmt/model.py
__repr__
marvosyntactical/joeynmt
3
python
def __repr__(self) -> str: '\n String representation: a description of encoder, decoder and embeddings\n\n :return: string representation\n ' return ('%s(\n\tencoder=%s,\n\tdecoder=%s,\n\tsrc_embed=%s,\n\ttrg_embed=%s)' % (self.__class__.__name__, self.encoder, self.decoder, self.src_embed, self.trg_embed))
def __repr__(self) -> str: '\n String representation: a description of encoder, decoder and embeddings\n\n :return: string representation\n ' return ('%s(\n\tencoder=%s,\n\tdecoder=%s,\n\tsrc_embed=%s,\n\ttrg_embed=%s)' % (self.__class__.__name__, self.encoder, self.decoder, self.src_embed, self.trg_embed))<|docstring|>String representation: a description of encoder, decoder and embeddings :return: string representation<|endoftext|>
cce786519a4d3f03af9aeb092282ab48e9a3bf094288f4d17bfbb0ab614c6a15
def add_message(request, msg, type='info'): ' Add status message\n\n Predefined message types\n\n * info\n\n * success\n\n * warning\n\n * error\n\n ' if (':' not in type): type = ('message:%s' % type) request.session.flash(render(request, type, msg), 'status')
Add status message Predefined message types * info * success * warning * error
venv/Lib/site-packages/player/message.py
add_message
mandamg/Exercicios-de-Python-do-Curso-em-Video
0
python
def add_message(request, msg, type='info'): ' Add status message\n\n Predefined message types\n\n * info\n\n * success\n\n * warning\n\n * error\n\n ' if (':' not in type): type = ('message:%s' % type) request.session.flash(render(request, type, msg), 'status')
def add_message(request, msg, type='info'): ' Add status message\n\n Predefined message types\n\n * info\n\n * success\n\n * warning\n\n * error\n\n ' if (':' not in type): type = ('message:%s' % type) request.session.flash(render(request, type, msg), 'status')<|docstring|>Add status message Predefined message types * info * success * warning * error<|endoftext|>
2eab19907938c4790a9087cc776b7aeb1ff3d68479b2c20103440e1229150771
def render_messages(request): ' Render previously added messages ' return ''.join(request.session.pop_flash('status'))
Render previously added messages
venv/Lib/site-packages/player/message.py
render_messages
mandamg/Exercicios-de-Python-do-Curso-em-Video
0
python
def render_messages(request): ' ' return .join(request.session.pop_flash('status'))
def render_messages(request): ' ' return .join(request.session.pop_flash('status'))<|docstring|>Render previously added messages<|endoftext|>
b3666e37bdd7c1c082daa42cc836b31d9be744b9160b560241aeac0dff49f428
@tmpl_filter('message:error') def error_message(context, request): ' Error message filter ' if (not isinstance(context, (set, list, tuple))): context = (context,) errors = [] for err in context: if isinstance(err, Exception): err = ('%s: %s' % (err.__class__.__name__, escape(str(err), True))) errors.append(err) return {'errors': errors}
Error message filter
venv/Lib/site-packages/player/message.py
error_message
mandamg/Exercicios-de-Python-do-Curso-em-Video
0
python
@tmpl_filter('message:error') def error_message(context, request): ' ' if (not isinstance(context, (set, list, tuple))): context = (context,) errors = [] for err in context: if isinstance(err, Exception): err = ('%s: %s' % (err.__class__.__name__, escape(str(err), True))) errors.append(err) return {'errors': errors}
@tmpl_filter('message:error') def error_message(context, request): ' ' if (not isinstance(context, (set, list, tuple))): context = (context,) errors = [] for err in context: if isinstance(err, Exception): err = ('%s: %s' % (err.__class__.__name__, escape(str(err), True))) errors.append(err) return {'errors': errors}<|docstring|>Error message filter<|endoftext|>
d5e93d53d2ad6717a8b1d05f6a2dd96883669af9cdc8dd7dfc2b596ed95f64a5
def activate(self, kalman_filter, frame_id): 'Start a new tracklet' self.kalman_filter = kalman_filter self.track_id = self.next_id() (self.mean, self.covariance) = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked self.frame_id = frame_id self.start_frame = frame_id
Start a new tracklet
trackers/tracker_api.py
activate
jjandnn/AlphaPose
6,306
python
def activate(self, kalman_filter, frame_id): self.kalman_filter = kalman_filter self.track_id = self.next_id() (self.mean, self.covariance) = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked self.frame_id = frame_id self.start_frame = frame_id
def activate(self, kalman_filter, frame_id): self.kalman_filter = kalman_filter self.track_id = self.next_id() (self.mean, self.covariance) = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked self.frame_id = frame_id self.start_frame = frame_id<|docstring|>Start a new tracklet<|endoftext|>
9effb05ca1893440f78d0273d0dc6d05e09ec4e9b67498ce96a95fd4d7012931
def update(self, new_track, frame_id, update_feature=True): '\n Update a matched track\n :type new_track: STrack\n :type frame_id: int\n :type update_feature: bool\n :return:\n ' self.frame_id = frame_id self.tracklet_len += 1 self.pose = new_track.pose self.detscore = new_track.detscore self.crop_box = new_track.crop_box self.file_name = new_track.file_name new_tlwh = new_track.tlwh (self.mean, self.covariance) = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score if update_feature: self.update_features(new_track.curr_feat)
Update a matched track :type new_track: STrack :type frame_id: int :type update_feature: bool :return:
trackers/tracker_api.py
update
jjandnn/AlphaPose
6,306
python
def update(self, new_track, frame_id, update_feature=True): '\n Update a matched track\n :type new_track: STrack\n :type frame_id: int\n :type update_feature: bool\n :return:\n ' self.frame_id = frame_id self.tracklet_len += 1 self.pose = new_track.pose self.detscore = new_track.detscore self.crop_box = new_track.crop_box self.file_name = new_track.file_name new_tlwh = new_track.tlwh (self.mean, self.covariance) = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score if update_feature: self.update_features(new_track.curr_feat)
def update(self, new_track, frame_id, update_feature=True): '\n Update a matched track\n :type new_track: STrack\n :type frame_id: int\n :type update_feature: bool\n :return:\n ' self.frame_id = frame_id self.tracklet_len += 1 self.pose = new_track.pose self.detscore = new_track.detscore self.crop_box = new_track.crop_box self.file_name = new_track.file_name new_tlwh = new_track.tlwh (self.mean, self.covariance) = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score if update_feature: self.update_features(new_track.curr_feat)<|docstring|>Update a matched track :type new_track: STrack :type frame_id: int :type update_feature: bool :return:<|endoftext|>
0eaa46df82d38b1c1b6a31fb41f8d026f5e70725602bda05c00aa65f5f91ca4b
@property def tlwh(self): 'Get current position in bounding box format `(top left x, top left y,\n width, height)`.\n ' if (self.mean is None): return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= (ret[2:] / 2) return ret
Get current position in bounding box format `(top left x, top left y, width, height)`.
trackers/tracker_api.py
tlwh
jjandnn/AlphaPose
6,306
python
@property def tlwh(self): 'Get current position in bounding box format `(top left x, top left y,\n width, height)`.\n ' if (self.mean is None): return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= (ret[2:] / 2) return ret
@property def tlwh(self): 'Get current position in bounding box format `(top left x, top left y,\n width, height)`.\n ' if (self.mean is None): return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= (ret[2:] / 2) return ret<|docstring|>Get current position in bounding box format `(top left x, top left y, width, height)`.<|endoftext|>
e3273e24b56241b12ef1783840c1a1e7ca455535f0a6ada7d7ab743b983040ea
@property def tlbr(self): 'Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n ' ret = self.tlwh.copy() ret[2:] += ret[:2] return ret
Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`.
trackers/tracker_api.py
tlbr
jjandnn/AlphaPose
6,306
python
@property def tlbr(self): 'Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n ' ret = self.tlwh.copy() ret[2:] += ret[:2] return ret
@property def tlbr(self): 'Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n ' ret = self.tlwh.copy() ret[2:] += ret[:2] return ret<|docstring|>Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`.<|endoftext|>
27823033c4fc5a97a08046241da17d9a07bf7a105f3d485b2f1fff55e451f5a5
@staticmethod def tlwh_to_xyah(tlwh): 'Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n ' ret = np.asarray(tlwh).copy() ret[:2] += (ret[2:] / 2) ret[2] /= ret[3] return ret
Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`.
trackers/tracker_api.py
tlwh_to_xyah
jjandnn/AlphaPose
6,306
python
@staticmethod def tlwh_to_xyah(tlwh): 'Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n ' ret = np.asarray(tlwh).copy() ret[:2] += (ret[2:] / 2) ret[2] /= ret[3] return ret
@staticmethod def tlwh_to_xyah(tlwh): 'Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n ' ret = np.asarray(tlwh).copy() ret[:2] += (ret[2:] / 2) ret[2] /= ret[3] return ret<|docstring|>Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`.<|endoftext|>
fac41b10474139c6b3836cab025ec20ff66fadbab8c9d3e04f76428082bd9ac6
def fetch_texture1D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,) or (W,C,) or (B,W,C,)\n uv : Tensor\n the input parameter tensor with shape (N,) or (B,N,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " return torch.t(torch.reshape(grid_sample(torch.reshape(torch.t(texture), (1, 3, (- 1), 1)), torch.reshape(torch.cat((torch.zeros(numel(uv), 1, dtype=torch.float, device=texture.device), to_column(((uv * 2) - 1))), dim=1), (1, 1, (- 1), 2)), mode=mode, padding_mode='border'), (3, numel(uv))))
Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (W,) or (W,C,) or (B,W,C,) uv : Tensor the input parameter tensor with shape (N,) or (B,N,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the (N,C,) or (B,N,C,) fetched data from the input texture
ACME/color/fetch_texture.py
fetch_texture1D
mauriziokovacic/ACME
3
python
def fetch_texture1D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,) or (W,C,) or (B,W,C,)\n uv : Tensor\n the input parameter tensor with shape (N,) or (B,N,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " return torch.t(torch.reshape(grid_sample(torch.reshape(torch.t(texture), (1, 3, (- 1), 1)), torch.reshape(torch.cat((torch.zeros(numel(uv), 1, dtype=torch.float, device=texture.device), to_column(((uv * 2) - 1))), dim=1), (1, 1, (- 1), 2)), mode=mode, padding_mode='border'), (3, numel(uv))))
def fetch_texture1D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,) or (W,C,) or (B,W,C,)\n uv : Tensor\n the input parameter tensor with shape (N,) or (B,N,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " return torch.t(torch.reshape(grid_sample(torch.reshape(torch.t(texture), (1, 3, (- 1), 1)), torch.reshape(torch.cat((torch.zeros(numel(uv), 1, dtype=torch.float, device=texture.device), to_column(((uv * 2) - 1))), dim=1), (1, 1, (- 1), 2)), mode=mode, padding_mode='border'), (3, numel(uv))))<|docstring|>Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (W,) or (W,C,) or (B,W,C,) uv : Tensor the input parameter tensor with shape (N,) or (B,N,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the (N,C,) or (B,N,C,) fetched data from the input texture<|endoftext|>
da4b43d1b0e984d494dc55472c5f7db340930c48068dc62d6323892ed9081273
def fetch_texture2D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,H,) or (C,W,H,) or (B,C,W,H,)\n uv : Tensor\n the input UV tensor with shape (N,2,) or (B,N,2,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " t = texture if (t.ndimension() < 3): t = t.view(1, *t.shape) if (t.ndimension() < 4): t = t.view(1, *t.shape) c = ((uv * 2) - 1) if (c.ndimension() < 3): c = c.expand(t.shape[0], *c.shape) if (c.ndimension() < 4): c = c.view(c.shape[0], 1, *c.shape[1:]) if (t.shape[0] < c.shape[0]): t = t.expand(c.shape[0], *t.shape[1:]) return torch.transpose(grid_sample(t, c, mode=mode, padding_mode='border').squeeze(), (- 1), (- 2))
Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (W,H,) or (C,W,H,) or (B,C,W,H,) uv : Tensor the input UV tensor with shape (N,2,) or (B,N,2,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the (N,C,) or (B,N,C,) fetched data from the input texture
ACME/color/fetch_texture.py
fetch_texture2D
mauriziokovacic/ACME
3
python
def fetch_texture2D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,H,) or (C,W,H,) or (B,C,W,H,)\n uv : Tensor\n the input UV tensor with shape (N,2,) or (B,N,2,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " t = texture if (t.ndimension() < 3): t = t.view(1, *t.shape) if (t.ndimension() < 4): t = t.view(1, *t.shape) c = ((uv * 2) - 1) if (c.ndimension() < 3): c = c.expand(t.shape[0], *c.shape) if (c.ndimension() < 4): c = c.view(c.shape[0], 1, *c.shape[1:]) if (t.shape[0] < c.shape[0]): t = t.expand(c.shape[0], *t.shape[1:]) return torch.transpose(grid_sample(t, c, mode=mode, padding_mode='border').squeeze(), (- 1), (- 2))
def fetch_texture2D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (W,H,) or (C,W,H,) or (B,C,W,H,)\n uv : Tensor\n the input UV tensor with shape (N,2,) or (B,N,2,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the (N,C,) or (B,N,C,) fetched data from the input texture\n " t = texture if (t.ndimension() < 3): t = t.view(1, *t.shape) if (t.ndimension() < 4): t = t.view(1, *t.shape) c = ((uv * 2) - 1) if (c.ndimension() < 3): c = c.expand(t.shape[0], *c.shape) if (c.ndimension() < 4): c = c.view(c.shape[0], 1, *c.shape[1:]) if (t.shape[0] < c.shape[0]): t = t.expand(c.shape[0], *t.shape[1:]) return torch.transpose(grid_sample(t, c, mode=mode, padding_mode='border').squeeze(), (- 1), (- 2))<|docstring|>Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (W,H,) or (C,W,H,) or (B,C,W,H,) uv : Tensor the input UV tensor with shape (N,2,) or (B,N,2,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the (N,C,) or (B,N,C,) fetched data from the input texture<|endoftext|>
2853ce3f7533a65488e13b1f755bc5bdc6224f18f4345edd3b7704a03f452ecb
def fetch_texture3D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (C,W,H,D)\n uv : Tensor\n the input UV tensor with shape (N,3,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the fetched data from the input texture\n " return torch.reshape(grid_sample(texture.unsqueeze(0), torch.reshape(((uv * 2) - 1), (1, 1, 1, (- 1), 3)), mode=mode, padding_mode='border').squeeze(0), ((- 1), texture.shape[0]))
Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (C,W,H,D) uv : Tensor the input UV tensor with shape (N,3,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the fetched data from the input texture
ACME/color/fetch_texture.py
fetch_texture3D
mauriziokovacic/ACME
3
python
def fetch_texture3D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (C,W,H,D)\n uv : Tensor\n the input UV tensor with shape (N,3,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the fetched data from the input texture\n " return torch.reshape(grid_sample(texture.unsqueeze(0), torch.reshape(((uv * 2) - 1), (1, 1, 1, (- 1), 3)), mode=mode, padding_mode='border').squeeze(0), ((- 1), texture.shape[0]))
def fetch_texture3D(texture, uv, mode='bilinear'): "\n Fetches an input texture using the given UVs in range [0,1]\n\n Parameters\n ----------\n texture : Tensor\n the input texture tensor with shape (C,W,H,D)\n uv : Tensor\n the input UV tensor with shape (N,3,)\n mode : str (optional)\n interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient.\n\n Returns\n -------\n Tensor\n the fetched data from the input texture\n " return torch.reshape(grid_sample(texture.unsqueeze(0), torch.reshape(((uv * 2) - 1), (1, 1, 1, (- 1), 3)), mode=mode, padding_mode='border').squeeze(0), ((- 1), texture.shape[0]))<|docstring|>Fetches an input texture using the given UVs in range [0,1] Parameters ---------- texture : Tensor the input texture tensor with shape (C,W,H,D) uv : Tensor the input UV tensor with shape (N,3,) mode : str (optional) interpolation method. Only 'bilinear' or 'nearest' are accepted. Only 'bilinear' retains the gradient. Returns ------- Tensor the fetched data from the input texture<|endoftext|>
b4c63ff939b5e44596dac9c60f2d03a96486886793dcdd5ae07e27c16773d2ec
def tree(self, path): '\n .. versionadded:: 2014.7.0\n\n Recurse through etcd and return all values\n ' ret = {} try: items = self.read(path) except (etcd.EtcdKeyNotFound, ValueError): return None except etcd.EtcdConnectionFailed: log.error("etcd: failed to perform 'tree' operation on path %s due to connection error", path) return None for item in items.children: comps = str(item.key).split('/') if (item.dir is True): if (item.key == path): continue ret[comps[(- 1)]] = self.tree(item.key) else: ret[comps[(- 1)]] = item.value return ret
.. versionadded:: 2014.7.0 Recurse through etcd and return all values
salt/utils/etcd_util.py
tree
Flowdalic/salt
9,425
python
def tree(self, path): '\n .. versionadded:: 2014.7.0\n\n Recurse through etcd and return all values\n ' ret = {} try: items = self.read(path) except (etcd.EtcdKeyNotFound, ValueError): return None except etcd.EtcdConnectionFailed: log.error("etcd: failed to perform 'tree' operation on path %s due to connection error", path) return None for item in items.children: comps = str(item.key).split('/') if (item.dir is True): if (item.key == path): continue ret[comps[(- 1)]] = self.tree(item.key) else: ret[comps[(- 1)]] = item.value return ret
def tree(self, path): '\n .. versionadded:: 2014.7.0\n\n Recurse through etcd and return all values\n ' ret = {} try: items = self.read(path) except (etcd.EtcdKeyNotFound, ValueError): return None except etcd.EtcdConnectionFailed: log.error("etcd: failed to perform 'tree' operation on path %s due to connection error", path) return None for item in items.children: comps = str(item.key).split('/') if (item.dir is True): if (item.key == path): continue ret[comps[(- 1)]] = self.tree(item.key) else: ret[comps[(- 1)]] = item.value return ret<|docstring|>.. versionadded:: 2014.7.0 Recurse through etcd and return all values<|endoftext|>
a93ee4fa2613e4f046e25066c681e4e186eee9bab78c8e58031d8f914838705e
def save_file(notif_file, data): '\n Overwrite the file with the new information\n ' notif_file.seek(0) header = data[0].keys() writer = csv.DictWriter(notif_file, fieldnames=header) writer.writeheader() writer.writerows(data) notif_file.flush()
Overwrite the file with the new information
Chapter10/send_notifications.py
save_file
PacktPublishing/Python-Automation-Cookbook--Second-Edition
155
python
def save_file(notif_file, data): '\n \n ' notif_file.seek(0) header = data[0].keys() writer = csv.DictWriter(notif_file, fieldnames=header) writer.writeheader() writer.writerows(data) notif_file.flush()
def save_file(notif_file, data): '\n \n ' notif_file.seek(0) header = data[0].keys() writer = csv.DictWriter(notif_file, fieldnames=header) writer.writeheader() writer.writerows(data) notif_file.flush()<|docstring|>Overwrite the file with the new information<|endoftext|>
5a4193f00c1bdfc356ae3094f8aaab3f0f15c42a19d9245111bd0d1e6aae8358
def int_ceil(x: int, y: int) -> int: '\n equivalent to math.ceil(x / y)\n :param x:\n :param y:\n :return:\n ' (q, r) = divmod(x, y) if r: q += 1 return q
equivalent to math.ceil(x / y) :param x: :param y: :return:
paginatify/__init__.py
int_ceil
coint-hub/paginatify-sqlalchemy
0
python
def int_ceil(x: int, y: int) -> int: '\n equivalent to math.ceil(x / y)\n :param x:\n :param y:\n :return:\n ' (q, r) = divmod(x, y) if r: q += 1 return q
def int_ceil(x: int, y: int) -> int: '\n equivalent to math.ceil(x / y)\n :param x:\n :param y:\n :return:\n ' (q, r) = divmod(x, y) if r: q += 1 return q<|docstring|>equivalent to math.ceil(x / y) :param x: :param y: :return:<|endoftext|>
ff51d48df9b9839a9deaaa34091a17a67ebba05e8ddf5272ac8916e27cae9e9e
def fetch_resolved_tokens(path, pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return resolved tokens from *path* and *pattern* if compatible.\n\n For instance::\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/ep002/sh003/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n {\n "project": "my_project",\n "episode": "ep002",\n "shot": "sh003"\n }\n\n If the *path* and *pattern* are compatible, but the *pattern* does not\n specify any tokens, an empty mapping will be returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/project/scripts",\n ... "/path/project/scripts"\n ... )\n {}\n\n If the *path* and *pattern* are not compatible, None is returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/build/character/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n None\n\n :param path: Path to compare template pattern to.\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_EXPRESSION`.\n\n :param match_start: Indicate whether the *path* should match against the\n start of the *pattern*. Default is True.\n\n :param match_end: Indicate whether the *path* should match against the\n end of the *pattern*. Default is True.\n\n :return: Mapping regrouping resolved token value associated with\n their name, or None if the *path* and *pattern* are not compatible.\n\n ' regex = construct_regexp(pattern, default_expression=default_expression, match_start=match_start, match_end=match_end) match = regex.search(path) if match: return match.groupdict()
Return resolved tokens from *path* and *pattern* if compatible. For instance:: >>> fetch_resolved_tokens( ... "/path/my_project/ep002/sh003/scripts", ... "/path/{project}/{episode:ep\d+}/{shot:sh\d+}/scripts" ... ) { "project": "my_project", "episode": "ep002", "shot": "sh003" } If the *path* and *pattern* are compatible, but the *pattern* does not specify any tokens, an empty mapping will be returned. >>> fetch_resolved_tokens( ... "/path/project/scripts", ... "/path/project/scripts" ... ) {} If the *path* and *pattern* are not compatible, None is returned. >>> fetch_resolved_tokens( ... "/path/my_project/build/character/scripts", ... "/path/{project}/{episode:ep\d+}/{shot:sh\d+}/scripts" ... ) None :param path: Path to compare template pattern to. :param pattern: String representing a template pattern path, with or without tokens. :param default_expression: Regular expression pattern to use for tokens when no expression is specified. Default is :data:`nomanclator.symbol.DEFAULT_EXPRESSION`. :param match_start: Indicate whether the *path* should match against the start of the *pattern*. Default is True. :param match_end: Indicate whether the *path* should match against the end of the *pattern*. Default is True. :return: Mapping regrouping resolved token value associated with their name, or None if the *path* and *pattern* are not compatible.
source/nomenclator/template.py
fetch_resolved_tokens
buddly27/nomenclator-nuke
11
python
def fetch_resolved_tokens(path, pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return resolved tokens from *path* and *pattern* if compatible.\n\n For instance::\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/ep002/sh003/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n {\n "project": "my_project",\n "episode": "ep002",\n "shot": "sh003"\n }\n\n If the *path* and *pattern* are compatible, but the *pattern* does not\n specify any tokens, an empty mapping will be returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/project/scripts",\n ... "/path/project/scripts"\n ... )\n {}\n\n If the *path* and *pattern* are not compatible, None is returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/build/character/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n None\n\n :param path: Path to compare template pattern to.\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_EXPRESSION`.\n\n :param match_start: Indicate whether the *path* should match against the\n start of the *pattern*. Default is True.\n\n :param match_end: Indicate whether the *path* should match against the\n end of the *pattern*. Default is True.\n\n :return: Mapping regrouping resolved token value associated with\n their name, or None if the *path* and *pattern* are not compatible.\n\n ' regex = construct_regexp(pattern, default_expression=default_expression, match_start=match_start, match_end=match_end) match = regex.search(path) if match: return match.groupdict()
def fetch_resolved_tokens(path, pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return resolved tokens from *path* and *pattern* if compatible.\n\n For instance::\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/ep002/sh003/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n {\n "project": "my_project",\n "episode": "ep002",\n "shot": "sh003"\n }\n\n If the *path* and *pattern* are compatible, but the *pattern* does not\n specify any tokens, an empty mapping will be returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/project/scripts",\n ... "/path/project/scripts"\n ... )\n {}\n\n If the *path* and *pattern* are not compatible, None is returned.\n\n >>> fetch_resolved_tokens(\n ... "/path/my_project/build/character/scripts",\n ... "/path/{project}/{episode:ep\\d+}/{shot:sh\\d+}/scripts"\n ... )\n None\n\n :param path: Path to compare template pattern to.\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_EXPRESSION`.\n\n :param match_start: Indicate whether the *path* should match against the\n start of the *pattern*. Default is True.\n\n :param match_end: Indicate whether the *path* should match against the\n end of the *pattern*. Default is True.\n\n :return: Mapping regrouping resolved token value associated with\n their name, or None if the *path* and *pattern* are not compatible.\n\n ' regex = construct_regexp(pattern, default_expression=default_expression, match_start=match_start, match_end=match_end) match = regex.search(path) if match: return match.groupdict()<|docstring|>Return resolved tokens from *path* and *pattern* if compatible. For instance:: >>> fetch_resolved_tokens( ... "/path/my_project/ep002/sh003/scripts", ... "/path/{project}/{episode:ep\d+}/{shot:sh\d+}/scripts" ... ) { "project": "my_project", "episode": "ep002", "shot": "sh003" } If the *path* and *pattern* are compatible, but the *pattern* does not specify any tokens, an empty mapping will be returned. >>> fetch_resolved_tokens( ... "/path/project/scripts", ... "/path/project/scripts" ... ) {} If the *path* and *pattern* are not compatible, None is returned. >>> fetch_resolved_tokens( ... "/path/my_project/build/character/scripts", ... "/path/{project}/{episode:ep\d+}/{shot:sh\d+}/scripts" ... ) None :param path: Path to compare template pattern to. :param pattern: String representing a template pattern path, with or without tokens. :param default_expression: Regular expression pattern to use for tokens when no expression is specified. Default is :data:`nomanclator.symbol.DEFAULT_EXPRESSION`. :param match_start: Indicate whether the *path* should match against the start of the *pattern*. Default is True. :param match_end: Indicate whether the *path* should match against the end of the *pattern*. Default is True. :return: Mapping regrouping resolved token value associated with their name, or None if the *path* and *pattern* are not compatible.<|endoftext|>
796697e69b6cb57a40fa75ac482a1a0cf1f84a7ec6207b8a48930fd6b09dcaa0
def construct_regexp(pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return template pattern converted into a regular expression.\n\n For instance::\n\n >>> construct_regexp("/path/{project}/{episode:ep\\d+}")\n re.compile(r"^/path/(?P<project>[\\w_.-]+)/(?P<episode>ep\\d+)$")\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_TOKEN_EXPRESSION`.\n\n :param match_start: Indicate whether the regular expression returned\n should match against the start of an input. Default is True.\n\n :param match_end: Indicate whether the regular expression returned\n should match against the end of an input. Default is True.\n\n :return: Compiled regular expression.\n\n ' pattern = sanitize_pattern(pattern) def _convert(match): 'Return corresponding regular expression.' name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression) sub_pattern = '{(?P<name>.+?)(:(?P<expression>.+?))?}' pattern = re.sub(sub_pattern, _convert, pattern) if match_start: pattern = ('^' + pattern) if match_end: pattern += '$' return re.compile(pattern)
Return template pattern converted into a regular expression. For instance:: >>> construct_regexp("/path/{project}/{episode:ep\d+}") re.compile(r"^/path/(?P<project>[\w_.-]+)/(?P<episode>ep\d+)$") :param pattern: String representing a template pattern path, with or without tokens. :param default_expression: Regular expression pattern to use for tokens when no expression is specified. Default is :data:`nomanclator.symbol.DEFAULT_TOKEN_EXPRESSION`. :param match_start: Indicate whether the regular expression returned should match against the start of an input. Default is True. :param match_end: Indicate whether the regular expression returned should match against the end of an input. Default is True. :return: Compiled regular expression.
source/nomenclator/template.py
construct_regexp
buddly27/nomenclator-nuke
11
python
def construct_regexp(pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return template pattern converted into a regular expression.\n\n For instance::\n\n >>> construct_regexp("/path/{project}/{episode:ep\\d+}")\n re.compile(r"^/path/(?P<project>[\\w_.-]+)/(?P<episode>ep\\d+)$")\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_TOKEN_EXPRESSION`.\n\n :param match_start: Indicate whether the regular expression returned\n should match against the start of an input. Default is True.\n\n :param match_end: Indicate whether the regular expression returned\n should match against the end of an input. Default is True.\n\n :return: Compiled regular expression.\n\n ' pattern = sanitize_pattern(pattern) def _convert(match): 'Return corresponding regular expression.' name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression) sub_pattern = '{(?P<name>.+?)(:(?P<expression>.+?))?}' pattern = re.sub(sub_pattern, _convert, pattern) if match_start: pattern = ('^' + pattern) if match_end: pattern += '$' return re.compile(pattern)
def construct_regexp(pattern, default_expression=DEFAULT_EXPRESSION, match_start=True, match_end=True): 'Return template pattern converted into a regular expression.\n\n For instance::\n\n >>> construct_regexp("/path/{project}/{episode:ep\\d+}")\n re.compile(r"^/path/(?P<project>[\\w_.-]+)/(?P<episode>ep\\d+)$")\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :param default_expression: Regular expression pattern to use for tokens\n when no expression is specified. Default is\n :data:`nomanclator.symbol.DEFAULT_TOKEN_EXPRESSION`.\n\n :param match_start: Indicate whether the regular expression returned\n should match against the start of an input. Default is True.\n\n :param match_end: Indicate whether the regular expression returned\n should match against the end of an input. Default is True.\n\n :return: Compiled regular expression.\n\n ' pattern = sanitize_pattern(pattern) def _convert(match): 'Return corresponding regular expression.' name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression) sub_pattern = '{(?P<name>.+?)(:(?P<expression>.+?))?}' pattern = re.sub(sub_pattern, _convert, pattern) if match_start: pattern = ('^' + pattern) if match_end: pattern += '$' return re.compile(pattern)<|docstring|>Return template pattern converted into a regular expression. For instance:: >>> construct_regexp("/path/{project}/{episode:ep\d+}") re.compile(r"^/path/(?P<project>[\w_.-]+)/(?P<episode>ep\d+)$") :param pattern: String representing a template pattern path, with or without tokens. :param default_expression: Regular expression pattern to use for tokens when no expression is specified. Default is :data:`nomanclator.symbol.DEFAULT_TOKEN_EXPRESSION`. :param match_start: Indicate whether the regular expression returned should match against the start of an input. Default is True. :param match_end: Indicate whether the regular expression returned should match against the end of an input. Default is True. :return: Compiled regular expression.<|endoftext|>
082e0e67cfd3a9d3e8f127a42c6ce15297c153e721b765d90f3436163cfc7ad2
def sanitize_pattern(pattern): 'Return template pattern with all special characters escaped.\n\n Tokens name and expressions are returned unchanged.\n\n For instance::\n\n >>> sanitize_pattern("/path*/{job:J_.*}")\n "/path\\*/{job:J_.*}"\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :return: Sanitized string template pattern.\n\n ' def _escape(match): "Escape 'other' group value if required." groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token'] sub_pattern = '(?P<token>{(.+?)(:.+?)?})|(?P<other>.+?)' return re.sub(sub_pattern, _escape, pattern)
Return template pattern with all special characters escaped. Tokens name and expressions are returned unchanged. For instance:: >>> sanitize_pattern("/path*/{job:J_.*}") "/path\*/{job:J_.*}" :param pattern: String representing a template pattern path, with or without tokens. :return: Sanitized string template pattern.
source/nomenclator/template.py
sanitize_pattern
buddly27/nomenclator-nuke
11
python
def sanitize_pattern(pattern): 'Return template pattern with all special characters escaped.\n\n Tokens name and expressions are returned unchanged.\n\n For instance::\n\n >>> sanitize_pattern("/path*/{job:J_.*}")\n "/path\\*/{job:J_.*}"\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :return: Sanitized string template pattern.\n\n ' def _escape(match): "Escape 'other' group value if required." groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token'] sub_pattern = '(?P<token>{(.+?)(:.+?)?})|(?P<other>.+?)' return re.sub(sub_pattern, _escape, pattern)
def sanitize_pattern(pattern): 'Return template pattern with all special characters escaped.\n\n Tokens name and expressions are returned unchanged.\n\n For instance::\n\n >>> sanitize_pattern("/path*/{job:J_.*}")\n "/path\\*/{job:J_.*}"\n\n :param pattern: String representing a template pattern path,\n with or without tokens.\n\n :return: Sanitized string template pattern.\n\n ' def _escape(match): "Escape 'other' group value if required." groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token'] sub_pattern = '(?P<token>{(.+?)(:.+?)?})|(?P<other>.+?)' return re.sub(sub_pattern, _escape, pattern)<|docstring|>Return template pattern with all special characters escaped. Tokens name and expressions are returned unchanged. For instance:: >>> sanitize_pattern("/path*/{job:J_.*}") "/path\*/{job:J_.*}" :param pattern: String representing a template pattern path, with or without tokens. :return: Sanitized string template pattern.<|endoftext|>
9e458897c5aa8afc4e6a1fe0b2da3c2996b543884044d02153df9d231a89e69d
def generate_scene_name(pattern, suffix, append_username=False, token_mapping=None): 'Generate scene name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "nk" or "hrox").\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' if append_username: pattern += '_{username}' pattern += '.{}'.format(suffix) return resolve(pattern, (token_mapping or {}))
Generate scene name from *pattern* using a mapping of resolved tokens. :param pattern: String representing a template base, with or without tokens. :param suffix: Suffix to apply for the generated name (e.g. "nk" or "hrox"). :param append_username: Indicate whether username should be appended to base name. Default is False. :param token_mapping: Mapping regrouping resolved token values associated with their name. Default is None. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.
source/nomenclator/template.py
generate_scene_name
buddly27/nomenclator-nuke
11
python
def generate_scene_name(pattern, suffix, append_username=False, token_mapping=None): 'Generate scene name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "nk" or "hrox").\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' if append_username: pattern += '_{username}' pattern += '.{}'.format(suffix) return resolve(pattern, (token_mapping or {}))
def generate_scene_name(pattern, suffix, append_username=False, token_mapping=None): 'Generate scene name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "nk" or "hrox").\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' if append_username: pattern += '_{username}' pattern += '.{}'.format(suffix) return resolve(pattern, (token_mapping or {}))<|docstring|>Generate scene name from *pattern* using a mapping of resolved tokens. :param pattern: String representing a template base, with or without tokens. :param suffix: Suffix to apply for the generated name (e.g. "nk" or "hrox"). :param append_username: Indicate whether username should be appended to base name. Default is False. :param token_mapping: Mapping regrouping resolved token values associated with their name. Default is None. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.<|endoftext|>
f4d35876bbef025cc4c1e4f80c6e014820f640abff743682e6fa936a1e1adeec
def generate_output_name(pattern, suffix, append_passname_to_subfolder=False, append_passname=False, append_colorspace=False, append_username=False, multi_views=False, token_mapping=None): 'Generate output name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "exr").\n\n :param append_passname_to_subfolder: Indicate whether passname should be appended to sub-folder.\n Default is False.\n\n :param append_passname: Indicate whether passname should be appended to base name.\n Default is False.\n\n :param append_colorspace: Indicate whether colorspace should be appended to base name.\n Default is False.\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param multi_views: Indicate whether the view should be appended to base name\n with the pattern \'%V\'. Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' elements = pattern.split(os.sep) if ((len(elements) > 1) and append_passname_to_subfolder): elements[(- 2)] += '_{passname}' if append_colorspace: elements[(- 1)] += '_{colorspace}' if append_username: elements[(- 1)] += '_{username}' if append_passname: elements[(- 1)] += '_{passname}' if multi_views: elements[(- 1)] += '_%V' if (suffix not in VIDEO_TYPES): elements[(- 1)] += '.{padding}' elements[(- 1)] += '.{}'.format(suffix) return resolve(os.sep.join(elements), (token_mapping or {}))
Generate output name from *pattern* using a mapping of resolved tokens. :param pattern: String representing a template base, with or without tokens. :param suffix: Suffix to apply for the generated name (e.g. "exr"). :param append_passname_to_subfolder: Indicate whether passname should be appended to sub-folder. Default is False. :param append_passname: Indicate whether passname should be appended to base name. Default is False. :param append_colorspace: Indicate whether colorspace should be appended to base name. Default is False. :param append_username: Indicate whether username should be appended to base name. Default is False. :param multi_views: Indicate whether the view should be appended to base name with the pattern '%V'. Default is False. :param token_mapping: Mapping regrouping resolved token values associated with their name. Default is None. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.
source/nomenclator/template.py
generate_output_name
buddly27/nomenclator-nuke
11
python
def generate_output_name(pattern, suffix, append_passname_to_subfolder=False, append_passname=False, append_colorspace=False, append_username=False, multi_views=False, token_mapping=None): 'Generate output name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "exr").\n\n :param append_passname_to_subfolder: Indicate whether passname should be appended to sub-folder.\n Default is False.\n\n :param append_passname: Indicate whether passname should be appended to base name.\n Default is False.\n\n :param append_colorspace: Indicate whether colorspace should be appended to base name.\n Default is False.\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param multi_views: Indicate whether the view should be appended to base name\n with the pattern \'%V\'. Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' elements = pattern.split(os.sep) if ((len(elements) > 1) and append_passname_to_subfolder): elements[(- 2)] += '_{passname}' if append_colorspace: elements[(- 1)] += '_{colorspace}' if append_username: elements[(- 1)] += '_{username}' if append_passname: elements[(- 1)] += '_{passname}' if multi_views: elements[(- 1)] += '_%V' if (suffix not in VIDEO_TYPES): elements[(- 1)] += '.{padding}' elements[(- 1)] += '.{}'.format(suffix) return resolve(os.sep.join(elements), (token_mapping or {}))
def generate_output_name(pattern, suffix, append_passname_to_subfolder=False, append_passname=False, append_colorspace=False, append_username=False, multi_views=False, token_mapping=None): 'Generate output name from *pattern* using a mapping of resolved tokens.\n\n :param pattern: String representing a template base,\n with or without tokens.\n\n :param suffix: Suffix to apply for the generated name (e.g. "exr").\n\n :param append_passname_to_subfolder: Indicate whether passname should be appended to sub-folder.\n Default is False.\n\n :param append_passname: Indicate whether passname should be appended to base name.\n Default is False.\n\n :param append_colorspace: Indicate whether colorspace should be appended to base name.\n Default is False.\n\n :param append_username: Indicate whether username should be appended to base name.\n Default is False.\n\n :param multi_views: Indicate whether the view should be appended to base name\n with the pattern \'%V\'. Default is False.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name. Default is None.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' elements = pattern.split(os.sep) if ((len(elements) > 1) and append_passname_to_subfolder): elements[(- 2)] += '_{passname}' if append_colorspace: elements[(- 1)] += '_{colorspace}' if append_username: elements[(- 1)] += '_{username}' if append_passname: elements[(- 1)] += '_{passname}' if multi_views: elements[(- 1)] += '_%V' if (suffix not in VIDEO_TYPES): elements[(- 1)] += '.{padding}' elements[(- 1)] += '.{}'.format(suffix) return resolve(os.sep.join(elements), (token_mapping or {}))<|docstring|>Generate output name from *pattern* using a mapping of resolved tokens. :param pattern: String representing a template base, with or without tokens. :param suffix: Suffix to apply for the generated name (e.g. "exr"). :param append_passname_to_subfolder: Indicate whether passname should be appended to sub-folder. Default is False. :param append_passname: Indicate whether passname should be appended to base name. Default is False. :param append_colorspace: Indicate whether colorspace should be appended to base name. Default is False. :param append_username: Indicate whether username should be appended to base name. Default is False. :param multi_views: Indicate whether the view should be appended to base name with the pattern '%V'. Default is False. :param token_mapping: Mapping regrouping resolved token values associated with their name. Default is None. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.<|endoftext|>
092de97fa268c2164569d1a8a75fa70c779cb552c96df8d034edc52c8d455286
def resolve(pattern, token_mapping): 'Return the resolved name for *pattern*.\n\n :param pattern: String representing a template pattern,\n with or without tokens.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' def _remove_expression(match): 'Return corresponding pattern without expression.' return '{{{0}}}'.format(match.group('name').split(':', 1)[0]) sub_pattern = '{(?P<name>.+?)}' pattern = re.sub(sub_pattern, _remove_expression, pattern) try: return pattern.format(**token_mapping) except KeyError as error: raise ValueError('Missing token value: {}'.format(error))
Return the resolved name for *pattern*. :param pattern: String representing a template pattern, with or without tokens. :param token_mapping: Mapping regrouping resolved token values associated with their name. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.
source/nomenclator/template.py
resolve
buddly27/nomenclator-nuke
11
python
def resolve(pattern, token_mapping): 'Return the resolved name for *pattern*.\n\n :param pattern: String representing a template pattern,\n with or without tokens.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' def _remove_expression(match): 'Return corresponding pattern without expression.' return '{{{0}}}'.format(match.group('name').split(':', 1)[0]) sub_pattern = '{(?P<name>.+?)}' pattern = re.sub(sub_pattern, _remove_expression, pattern) try: return pattern.format(**token_mapping) except KeyError as error: raise ValueError('Missing token value: {}'.format(error))
def resolve(pattern, token_mapping): 'Return the resolved name for *pattern*.\n\n :param pattern: String representing a template pattern,\n with or without tokens.\n\n :param token_mapping: Mapping regrouping resolved token values associated\n with their name.\n\n :return: String name.\n\n :raise: exc:`ValueError` if a token within the *pattern* does not have any\n value within the token map.\n\n ' def _remove_expression(match): 'Return corresponding pattern without expression.' return '{{{0}}}'.format(match.group('name').split(':', 1)[0]) sub_pattern = '{(?P<name>.+?)}' pattern = re.sub(sub_pattern, _remove_expression, pattern) try: return pattern.format(**token_mapping) except KeyError as error: raise ValueError('Missing token value: {}'.format(error))<|docstring|>Return the resolved name for *pattern*. :param pattern: String representing a template pattern, with or without tokens. :param token_mapping: Mapping regrouping resolved token values associated with their name. :return: String name. :raise: exc:`ValueError` if a token within the *pattern* does not have any value within the token map.<|endoftext|>
d7acdca05e5bbe8a5716fe3a92f52c58c42ebe43f48b8e92fab2df316485ec89
def _convert(match): 'Return corresponding regular expression.' name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression)
Return corresponding regular expression.
source/nomenclator/template.py
_convert
buddly27/nomenclator-nuke
11
python
def _convert(match): name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression)
def _convert(match): name = match.group('name') expression = (match.group('expression') or default_expression) return '(?P<{0}>{1})'.format(name, expression)<|docstring|>Return corresponding regular expression.<|endoftext|>
a712f850e283448872c07f9c4e4a49050181985be81937ee48adb52f32346f38
def _escape(match): "Escape 'other' group value if required." groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token']
Escape 'other' group value if required.
source/nomenclator/template.py
_escape
buddly27/nomenclator-nuke
11
python
def _escape(match): groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token']
def _escape(match): groups = match.groupdict() if (groups['other'] is not None): return re.escape(groups['other']) return groups['token']<|docstring|>Escape 'other' group value if required.<|endoftext|>
ac3c727abcfaf0e9666c2bb668b0288aa596c8eaaf3fb69844626b581d4f889e
def _remove_expression(match): 'Return corresponding pattern without expression.' return '{{{0}}}'.format(match.group('name').split(':', 1)[0])
Return corresponding pattern without expression.
source/nomenclator/template.py
_remove_expression
buddly27/nomenclator-nuke
11
python
def _remove_expression(match): return '{{{0}}}'.format(match.group('name').split(':', 1)[0])
def _remove_expression(match): return '{{{0}}}'.format(match.group('name').split(':', 1)[0])<|docstring|>Return corresponding pattern without expression.<|endoftext|>
72813f49de6950a9568ad2751f65fecd8d525f78321a22012914cb9fdc1c7f39
def __init__(self, name, price=10, weight=20, flammability=0.5, identifier=random.randint(1000000, 9999999)): '\n Part 1: Constructor initializes Product class\n ' self.name = str(name) self.price = int(price) self.weight = int(weight) self.flammability = float(flammability) self.identifier = int(identifier)
Part 1: Constructor initializes Product class
acme.py
__init__
andrewlee977/lambdata
0
python
def __init__(self, name, price=10, weight=20, flammability=0.5, identifier=random.randint(1000000, 9999999)): '\n \n ' self.name = str(name) self.price = int(price) self.weight = int(weight) self.flammability = float(flammability) self.identifier = int(identifier)
def __init__(self, name, price=10, weight=20, flammability=0.5, identifier=random.randint(1000000, 9999999)): '\n \n ' self.name = str(name) self.price = int(price) self.weight = int(weight) self.flammability = float(flammability) self.identifier = int(identifier)<|docstring|>Part 1: Constructor initializes Product class<|endoftext|>
b01eb46be0ad74880356e1deb8052d077b70be1f57c6970815355f3a50f38b1a
def stealability(self): '\n Part 2: function calculates stealability of an instance\n ' ppw = (self.price / self.weight) if (ppw < 0.5): return 'Not so stealable...' elif ((ppw >= 0.5) and (ppw < 1.0)): return 'Kinda stealable...' else: return 'Very stealable!'
Part 2: function calculates stealability of an instance
acme.py
stealability
andrewlee977/lambdata
0
python
def stealability(self): '\n \n ' ppw = (self.price / self.weight) if (ppw < 0.5): return 'Not so stealable...' elif ((ppw >= 0.5) and (ppw < 1.0)): return 'Kinda stealable...' else: return 'Very stealable!'
def stealability(self): '\n \n ' ppw = (self.price / self.weight) if (ppw < 0.5): return 'Not so stealable...' elif ((ppw >= 0.5) and (ppw < 1.0)): return 'Kinda stealable...' else: return 'Very stealable!'<|docstring|>Part 2: function calculates stealability of an instance<|endoftext|>
e1524837a446a9240ecb173e6573d121cd0b6976dc60da3df3cee6f8fffb1dc8
def explode(self): '\n Part 2: function calculates explodability of an instance\n ' ftw = (self.flammability * self.weight) if (ftw < 10): return '...fizzle' elif ((ftw >= 10) and (ftw < 50)): return '...boom!' else: return '...BABOOM!!'
Part 2: function calculates explodability of an instance
acme.py
explode
andrewlee977/lambdata
0
python
def explode(self): '\n \n ' ftw = (self.flammability * self.weight) if (ftw < 10): return '...fizzle' elif ((ftw >= 10) and (ftw < 50)): return '...boom!' else: return '...BABOOM!!'
def explode(self): '\n \n ' ftw = (self.flammability * self.weight) if (ftw < 10): return '...fizzle' elif ((ftw >= 10) and (ftw < 50)): return '...boom!' else: return '...BABOOM!!'<|docstring|>Part 2: function calculates explodability of an instance<|endoftext|>
6909ff8c8050772b5bf0d7602a3b83523365c101713043206b0c17293314485b
def __init___(self, name, price, flammability, identifier, weight=10): '\n Part 3: Constructor initializes BoxingGlove class\n ' super().__init__(name, price, weight, flammability, identifier)
Part 3: Constructor initializes BoxingGlove class
acme.py
__init___
andrewlee977/lambdata
0
python
def __init___(self, name, price, flammability, identifier, weight=10): '\n \n ' super().__init__(name, price, weight, flammability, identifier)
def __init___(self, name, price, flammability, identifier, weight=10): '\n \n ' super().__init__(name, price, weight, flammability, identifier)<|docstring|>Part 3: Constructor initializes BoxingGlove class<|endoftext|>
1dc80e1fd6386190ff8f53d8ea5baa98bcb19323f610373b6f89e06b0d3c671f
def explode(self): '\n Part 3: function calculates explodability of an instance\n ' return "...it's a glove."
Part 3: function calculates explodability of an instance
acme.py
explode
andrewlee977/lambdata
0
python
def explode(self): '\n \n ' return "...it's a glove."
def explode(self): '\n \n ' return "...it's a glove."<|docstring|>Part 3: function calculates explodability of an instance<|endoftext|>
d94c0ea2cb56757610577b6cd948c1949518e44818bf4a91445e2c37c2580850
def add_char(store, char): 'add a character to the count store' already_there = False for x in store: if (x[0] == char): x[1] = (x[1] + 1) already_there = True if (not already_there): store.append([char, 1])
add a character to the count store
code/char_histo_vert.py
add_char
tumuum/prog-book
0
python
def add_char(store, char): already_there = False for x in store: if (x[0] == char): x[1] = (x[1] + 1) already_there = True if (not already_there): store.append([char, 1])
def add_char(store, char): already_there = False for x in store: if (x[0] == char): x[1] = (x[1] + 1) already_there = True if (not already_there): store.append([char, 1])<|docstring|>add a character to the count store<|endoftext|>
5b80f7fdd96dec5c8e3e0bb7c7cc7d27e3c8821f72acd8b3dc986c6a3aab5fdc
def mk_py(names): '\n Generate .py file with a dict of default (english) values\n ' log('> Generating __maps.py with default (english) values') py_file = codecs.open(PY_PATH, 'w', ' utf8') py_file.write('# -*- coding: utf-8 -*-\n\n# AUTOMATICALLY GENERATED FILE, DO NOT EDIT') def write(key): py_file.write(('\n\n%s = {\n' % key)) for (k, v) in names[key].items(): py_file.write((u" '%s': u'%s',\n" % (k, v.replace(u"'", u"\\'")))) py_file.write('}') for name in ITEMS: write(name) py_file.close()
Generate .py file with a dict of default (english) values
maker/l18n_maker/mktrans.py
mk_py
tkhyn/l18n
3
python
def mk_py(names): '\n \n ' log('> Generating __maps.py with default (english) values') py_file = codecs.open(PY_PATH, 'w', ' utf8') py_file.write('# -*- coding: utf-8 -*-\n\n# AUTOMATICALLY GENERATED FILE, DO NOT EDIT') def write(key): py_file.write(('\n\n%s = {\n' % key)) for (k, v) in names[key].items(): py_file.write((u" '%s': u'%s',\n" % (k, v.replace(u"'", u"\\'")))) py_file.write('}') for name in ITEMS: write(name) py_file.close()
def mk_py(names): '\n \n ' log('> Generating __maps.py with default (english) values') py_file = codecs.open(PY_PATH, 'w', ' utf8') py_file.write('# -*- coding: utf-8 -*-\n\n# AUTOMATICALLY GENERATED FILE, DO NOT EDIT') def write(key): py_file.write(('\n\n%s = {\n' % key)) for (k, v) in names[key].items(): py_file.write((u" '%s': u'%s',\n" % (k, v.replace(u"'", u"\\'")))) py_file.write('}') for name in ITEMS: write(name) py_file.close()<|docstring|>Generate .py file with a dict of default (english) values<|endoftext|>
6bf3d4e98b0c0ad1811d52fc513f8090cc7b3688816d721f5e6485bd3bdad83d
def mk_po(loc, root_names, trans): '\n Generate a .po file for locale loc\n ' header = (u'# PYTZ TIMEZONE CITIES AND TERRITORIES TRANSLATION FILE\n\nmsgid ""\nmsgstr ""\n\n"Project-Id-Version: l18n\\n"\n"Report-Msgid-Bugs-To: \\n"\n"POT-Creation-Date: %(date)s\\n"\n"PO-Revision-Date: \\n"\n"Last-Translator: l18n maker\\n"\n"Language-Team: \\n"\n"MIME-Version: 1.0\\n"\n"Content-Type: text/plain; charset=UTF-8\\n"\n"Content-Transfer-Encoding: 8bit\\n"\n"Plural-Forms: nplurals=2; plural=(n > 1)\\n"\n"X-Poedit-SourceCharset: utf-8\\n"\n"Language: ' % {'date': datetime.now(pytz.utc).replace(microsecond=0)}) log(('> Generating .po file for locale ' + loc)) po_path = (PO_PATH % loc) try: os.makedirs(os.path.dirname(po_path)) except OSError: pass po_file = codecs.open(po_path, 'w', ' utf8') po_file.write(((header + loc) + u'\\n"\n\n')) def write(key): for (k, v) in trans[key].items(): try: root_name = root_names[key][k] except KeyError: if (key == 'tz_locations'): root_name = root_names['tz_cities'][k] else: raise po_file.write((u'msgid "%s"\nmsgstr "%s"\n\n' % (root_name, v))) for name in ITEMS: write(name) po_file.close() return po_path
Generate a .po file for locale loc
maker/l18n_maker/mktrans.py
mk_po
tkhyn/l18n
3
python
def mk_po(loc, root_names, trans): '\n \n ' header = (u'# PYTZ TIMEZONE CITIES AND TERRITORIES TRANSLATION FILE\n\nmsgid \nmsgstr \n\n"Project-Id-Version: l18n\\n"\n"Report-Msgid-Bugs-To: \\n"\n"POT-Creation-Date: %(date)s\\n"\n"PO-Revision-Date: \\n"\n"Last-Translator: l18n maker\\n"\n"Language-Team: \\n"\n"MIME-Version: 1.0\\n"\n"Content-Type: text/plain; charset=UTF-8\\n"\n"Content-Transfer-Encoding: 8bit\\n"\n"Plural-Forms: nplurals=2; plural=(n > 1)\\n"\n"X-Poedit-SourceCharset: utf-8\\n"\n"Language: ' % {'date': datetime.now(pytz.utc).replace(microsecond=0)}) log(('> Generating .po file for locale ' + loc)) po_path = (PO_PATH % loc) try: os.makedirs(os.path.dirname(po_path)) except OSError: pass po_file = codecs.open(po_path, 'w', ' utf8') po_file.write(((header + loc) + u'\\n"\n\n')) def write(key): for (k, v) in trans[key].items(): try: root_name = root_names[key][k] except KeyError: if (key == 'tz_locations'): root_name = root_names['tz_cities'][k] else: raise po_file.write((u'msgid "%s"\nmsgstr "%s"\n\n' % (root_name, v))) for name in ITEMS: write(name) po_file.close() return po_path
def mk_po(loc, root_names, trans): '\n \n ' header = (u'# PYTZ TIMEZONE CITIES AND TERRITORIES TRANSLATION FILE\n\nmsgid \nmsgstr \n\n"Project-Id-Version: l18n\\n"\n"Report-Msgid-Bugs-To: \\n"\n"POT-Creation-Date: %(date)s\\n"\n"PO-Revision-Date: \\n"\n"Last-Translator: l18n maker\\n"\n"Language-Team: \\n"\n"MIME-Version: 1.0\\n"\n"Content-Type: text/plain; charset=UTF-8\\n"\n"Content-Transfer-Encoding: 8bit\\n"\n"Plural-Forms: nplurals=2; plural=(n > 1)\\n"\n"X-Poedit-SourceCharset: utf-8\\n"\n"Language: ' % {'date': datetime.now(pytz.utc).replace(microsecond=0)}) log(('> Generating .po file for locale ' + loc)) po_path = (PO_PATH % loc) try: os.makedirs(os.path.dirname(po_path)) except OSError: pass po_file = codecs.open(po_path, 'w', ' utf8') po_file.write(((header + loc) + u'\\n"\n\n')) def write(key): for (k, v) in trans[key].items(): try: root_name = root_names[key][k] except KeyError: if (key == 'tz_locations'): root_name = root_names['tz_cities'][k] else: raise po_file.write((u'msgid "%s"\nmsgstr "%s"\n\n' % (root_name, v))) for name in ITEMS: write(name) po_file.close() return po_path<|docstring|>Generate a .po file for locale loc<|endoftext|>
bc83df16ffd32fe2561777d3de3b86e0fecbf680f7f5b52cd71cc3eb3a01f9e0
def forward(self, encodings: Dict[(str, Any)], sent_lengths: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: 'Predict POS, heads and deprel scores.\n\n ## Outputs\n\n `tag_scores, arc_scores, lab_scores` with shapes\n\n - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$\n - `arc_scores`: $`batch_size×n_deps×n_possible_heads`$\n - `label_scores`: $`batch_size×n_deps×n_possible_heads×num_deprels`$\n ' embeddings = [self.lexers[lexer_name](encodings[lexer_name]) for lexer_name in self.lexers_order] inpt = torch.cat(embeddings, dim=(- 1)) packed_inpt = pack_padded_sequence(inpt, sent_lengths, batch_first=True, enforce_sorted=False) (packed_dep_embeddings, _) = self.dep_rnn(packed_inpt) (dep_embeddings, _) = pad_packed_sequence(packed_dep_embeddings, batch_first=True) tag_scores = self.pos_tagger(dep_embeddings) arc_h = self.arc_mlp_h(dep_embeddings) arc_d = self.arc_mlp_d(dep_embeddings) lab_h = self.lab_mlp_h(dep_embeddings) lab_d = self.lab_mlp_d(dep_embeddings) arc_scores = self.arc_biaffine(arc_d, arc_h).squeeze((- 1)) lab_scores = self.lab_biaffine(lab_d, lab_h) return (tag_scores, arc_scores, lab_scores)
Predict POS, heads and deprel scores. ## Outputs `tag_scores, arc_scores, lab_scores` with shapes - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$ - `arc_scores`: $`batch_size×n_deps×n_possible_heads`$ - `label_scores`: $`batch_size×n_deps×n_possible_heads×num_deprels`$
hopsparser/parser.py
forward
hopsparser/npdependency
0
python
def forward(self, encodings: Dict[(str, Any)], sent_lengths: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: 'Predict POS, heads and deprel scores.\n\n ## Outputs\n\n `tag_scores, arc_scores, lab_scores` with shapes\n\n - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$\n - `arc_scores`: $`batch_size×n_deps×n_possible_heads`$\n - `label_scores`: $`batch_size×n_deps×n_possible_heads×num_deprels`$\n ' embeddings = [self.lexers[lexer_name](encodings[lexer_name]) for lexer_name in self.lexers_order] inpt = torch.cat(embeddings, dim=(- 1)) packed_inpt = pack_padded_sequence(inpt, sent_lengths, batch_first=True, enforce_sorted=False) (packed_dep_embeddings, _) = self.dep_rnn(packed_inpt) (dep_embeddings, _) = pad_packed_sequence(packed_dep_embeddings, batch_first=True) tag_scores = self.pos_tagger(dep_embeddings) arc_h = self.arc_mlp_h(dep_embeddings) arc_d = self.arc_mlp_d(dep_embeddings) lab_h = self.lab_mlp_h(dep_embeddings) lab_d = self.lab_mlp_d(dep_embeddings) arc_scores = self.arc_biaffine(arc_d, arc_h).squeeze((- 1)) lab_scores = self.lab_biaffine(lab_d, lab_h) return (tag_scores, arc_scores, lab_scores)
def forward(self, encodings: Dict[(str, Any)], sent_lengths: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: 'Predict POS, heads and deprel scores.\n\n ## Outputs\n\n `tag_scores, arc_scores, lab_scores` with shapes\n\n - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$\n - `arc_scores`: $`batch_size×n_deps×n_possible_heads`$\n - `label_scores`: $`batch_size×n_deps×n_possible_heads×num_deprels`$\n ' embeddings = [self.lexers[lexer_name](encodings[lexer_name]) for lexer_name in self.lexers_order] inpt = torch.cat(embeddings, dim=(- 1)) packed_inpt = pack_padded_sequence(inpt, sent_lengths, batch_first=True, enforce_sorted=False) (packed_dep_embeddings, _) = self.dep_rnn(packed_inpt) (dep_embeddings, _) = pad_packed_sequence(packed_dep_embeddings, batch_first=True) tag_scores = self.pos_tagger(dep_embeddings) arc_h = self.arc_mlp_h(dep_embeddings) arc_d = self.arc_mlp_d(dep_embeddings) lab_h = self.lab_mlp_h(dep_embeddings) lab_d = self.lab_mlp_d(dep_embeddings) arc_scores = self.arc_biaffine(arc_d, arc_h).squeeze((- 1)) lab_scores = self.lab_biaffine(lab_d, lab_h) return (tag_scores, arc_scores, lab_scores)<|docstring|>Predict POS, heads and deprel scores. ## Outputs `tag_scores, arc_scores, lab_scores` with shapes - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$ - `arc_scores`: $`batch_size×n_deps×n_possible_heads`$ - `label_scores`: $`batch_size×n_deps×n_possible_heads×num_deprels`$<|endoftext|>
bb5aef842639ce35d23d1d76422b9640dc74515c5b33dfcf49ce4ca4c80126d0
def get_version(filename): '\n Trivial parser to extract a __version__ variable from a source file.\n\n :param str filename: the file to extract __version__ from\n :returns str: the version string for the package\n ' version_re = re.compile('(\\d\\.\\d(\\.\\d+)?)') with io.open(filename, 'r') as source: for (line_num, line) in enumerate(source): if line.startswith('__version__'): match = version_re.search(line) if (not match): raise Exception(('Invalid __version__ string found on line %d of %s' % ((line_num + 1), filename))) return match.group(1) raise Exception(('No __version__ line found in %s' % filename))
Trivial parser to extract a __version__ variable from a source file. :param str filename: the file to extract __version__ from :returns str: the version string for the package
utils.py
get_version
waveform80/lars
14
python
def get_version(filename): '\n Trivial parser to extract a __version__ variable from a source file.\n\n :param str filename: the file to extract __version__ from\n :returns str: the version string for the package\n ' version_re = re.compile('(\\d\\.\\d(\\.\\d+)?)') with io.open(filename, 'r') as source: for (line_num, line) in enumerate(source): if line.startswith('__version__'): match = version_re.search(line) if (not match): raise Exception(('Invalid __version__ string found on line %d of %s' % ((line_num + 1), filename))) return match.group(1) raise Exception(('No __version__ line found in %s' % filename))
def get_version(filename): '\n Trivial parser to extract a __version__ variable from a source file.\n\n :param str filename: the file to extract __version__ from\n :returns str: the version string for the package\n ' version_re = re.compile('(\\d\\.\\d(\\.\\d+)?)') with io.open(filename, 'r') as source: for (line_num, line) in enumerate(source): if line.startswith('__version__'): match = version_re.search(line) if (not match): raise Exception(('Invalid __version__ string found on line %d of %s' % ((line_num + 1), filename))) return match.group(1) raise Exception(('No __version__ line found in %s' % filename))<|docstring|>Trivial parser to extract a __version__ variable from a source file. :param str filename: the file to extract __version__ from :returns str: the version string for the package<|endoftext|>
40bff2d6f96270fbfe2962f1e83e066aa2b15882607bc75e31c1b52e4a4e411d
def description(filename): '\n Returns the first non-heading paragraph from a ReStructuredText file.\n\n :param str filename: the file to extract the description from\n :returns str: the description of the package\n ' state = 'before_header' result = [] with io.open(filename, 'r') as rst_file: for line in rst_file: line = line.rstrip() if (line == ''): if (state == 'in_para'): state = 'after_para' elif (line == ('=' * len(line))): if (state == 'before_header'): state = 'in_header' elif (state == 'in_header'): state = 'before_para' elif (state == 'before_para'): state = 'in_para' if (state == 'in_para'): result.append(line) elif (state == 'after_para'): break return ' '.join((line.strip() for line in result))
Returns the first non-heading paragraph from a ReStructuredText file. :param str filename: the file to extract the description from :returns str: the description of the package
utils.py
description
waveform80/lars
14
python
def description(filename): '\n Returns the first non-heading paragraph from a ReStructuredText file.\n\n :param str filename: the file to extract the description from\n :returns str: the description of the package\n ' state = 'before_header' result = [] with io.open(filename, 'r') as rst_file: for line in rst_file: line = line.rstrip() if (line == ): if (state == 'in_para'): state = 'after_para' elif (line == ('=' * len(line))): if (state == 'before_header'): state = 'in_header' elif (state == 'in_header'): state = 'before_para' elif (state == 'before_para'): state = 'in_para' if (state == 'in_para'): result.append(line) elif (state == 'after_para'): break return ' '.join((line.strip() for line in result))
def description(filename): '\n Returns the first non-heading paragraph from a ReStructuredText file.\n\n :param str filename: the file to extract the description from\n :returns str: the description of the package\n ' state = 'before_header' result = [] with io.open(filename, 'r') as rst_file: for line in rst_file: line = line.rstrip() if (line == ): if (state == 'in_para'): state = 'after_para' elif (line == ('=' * len(line))): if (state == 'before_header'): state = 'in_header' elif (state == 'in_header'): state = 'before_para' elif (state == 'before_para'): state = 'in_para' if (state == 'in_para'): result.append(line) elif (state == 'after_para'): break return ' '.join((line.strip() for line in result))<|docstring|>Returns the first non-heading paragraph from a ReStructuredText file. :param str filename: the file to extract the description from :returns str: the description of the package<|endoftext|>
0fdbd41d54fe7cedae6636988d42f8776c97586de1305d1cac3a10582197315a
def configure_session(intra_threads=32, inter_threads=2, kmp_blocktime=None, kmp_affinity=None, omp_num_threads=None, gpu=None): 'Sets the thread knobs in the TF backend' if (kmp_blocktime is not None): os.environ['KMP_BLOCKTIME'] = str(kmp_blocktime) if (kmp_affinity is not None): os.environ['KMP_AFFINITY'] = kmp_affinity if (omp_num_threads is not None): os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (dist.rank() == 0): logging.info('KMP_BLOCKTIME %s', os.environ.get('KMP_BLOCKTIME', '')) logging.info('KMP_AFFINITY %s', os.environ.get('KMP_AFFINITY', '')) logging.info('OMP_NUM_THREADS %s', os.environ.get('OMP_NUM_THREADS', '')) logging.info('INTRA_THREADS %i', intra_threads) logging.info('INTER_THREADS %i', inter_threads) config = tf.ConfigProto(inter_op_parallelism_threads=inter_threads, intra_op_parallelism_threads=intra_threads) if (gpu is not None): config.gpu_options.visible_device_list = str(gpu) tf.keras.backend.set_session(tf.Session(config=config))
Sets the thread knobs in the TF backend
utils/device.py
configure_session
lukasgd/cosmoflow-benchmark
0
python
def configure_session(intra_threads=32, inter_threads=2, kmp_blocktime=None, kmp_affinity=None, omp_num_threads=None, gpu=None): if (kmp_blocktime is not None): os.environ['KMP_BLOCKTIME'] = str(kmp_blocktime) if (kmp_affinity is not None): os.environ['KMP_AFFINITY'] = kmp_affinity if (omp_num_threads is not None): os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (dist.rank() == 0): logging.info('KMP_BLOCKTIME %s', os.environ.get('KMP_BLOCKTIME', )) logging.info('KMP_AFFINITY %s', os.environ.get('KMP_AFFINITY', )) logging.info('OMP_NUM_THREADS %s', os.environ.get('OMP_NUM_THREADS', )) logging.info('INTRA_THREADS %i', intra_threads) logging.info('INTER_THREADS %i', inter_threads) config = tf.ConfigProto(inter_op_parallelism_threads=inter_threads, intra_op_parallelism_threads=intra_threads) if (gpu is not None): config.gpu_options.visible_device_list = str(gpu) tf.keras.backend.set_session(tf.Session(config=config))
def configure_session(intra_threads=32, inter_threads=2, kmp_blocktime=None, kmp_affinity=None, omp_num_threads=None, gpu=None): if (kmp_blocktime is not None): os.environ['KMP_BLOCKTIME'] = str(kmp_blocktime) if (kmp_affinity is not None): os.environ['KMP_AFFINITY'] = kmp_affinity if (omp_num_threads is not None): os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (dist.rank() == 0): logging.info('KMP_BLOCKTIME %s', os.environ.get('KMP_BLOCKTIME', )) logging.info('KMP_AFFINITY %s', os.environ.get('KMP_AFFINITY', )) logging.info('OMP_NUM_THREADS %s', os.environ.get('OMP_NUM_THREADS', )) logging.info('INTRA_THREADS %i', intra_threads) logging.info('INTER_THREADS %i', inter_threads) config = tf.ConfigProto(inter_op_parallelism_threads=inter_threads, intra_op_parallelism_threads=intra_threads) if (gpu is not None): config.gpu_options.visible_device_list = str(gpu) tf.keras.backend.set_session(tf.Session(config=config))<|docstring|>Sets the thread knobs in the TF backend<|endoftext|>
fa0ff45cedea8a0c17de6539a7f7fbaf3f7fedbde4edb56f9f982e488f0bde22
@click.group() def datastore(): u'Perform commands to set up the datastore.\n '
Perform commands to set up the datastore.
ckanext/datastore/cli.py
datastore
gvarela1981/ckan
58
python
@click.group() def datastore(): u'\n '
@click.group() def datastore(): u'\n '<|docstring|>Perform commands to set up the datastore.<|endoftext|>
ac43d477aee532855d1ad748d35dbb0a28da506b72a5597848997ebb9bdfdeb7
@datastore.command(u'set-permissions', short_help=u'Generate SQL for permission configuration.') def set_permissions(): u'Emit an SQL script that will set the permissions for the datastore\n users as configured in your configuration file.' write_url = _parse_db_config(u'ckan.datastore.write_url') read_url = _parse_db_config(u'ckan.datastore.read_url') db_url = _parse_db_config(u'sqlalchemy.url') if (write_url[u'db_name'] != read_url[u'db_name']): click.secho(u'The datastore write_url and read_url must refer to the same database!', fg=u'red', bold=True) raise click.Abort() sql = permissions_sql(maindb=db_url[u'db_name'], datastoredb=write_url[u'db_name'], mainuser=db_url[u'db_user'], writeuser=write_url[u'db_user'], readuser=read_url[u'db_user']) click.echo(sql)
Emit an SQL script that will set the permissions for the datastore users as configured in your configuration file.
ckanext/datastore/cli.py
set_permissions
gvarela1981/ckan
58
python
@datastore.command(u'set-permissions', short_help=u'Generate SQL for permission configuration.') def set_permissions(): u'Emit an SQL script that will set the permissions for the datastore\n users as configured in your configuration file.' write_url = _parse_db_config(u'ckan.datastore.write_url') read_url = _parse_db_config(u'ckan.datastore.read_url') db_url = _parse_db_config(u'sqlalchemy.url') if (write_url[u'db_name'] != read_url[u'db_name']): click.secho(u'The datastore write_url and read_url must refer to the same database!', fg=u'red', bold=True) raise click.Abort() sql = permissions_sql(maindb=db_url[u'db_name'], datastoredb=write_url[u'db_name'], mainuser=db_url[u'db_user'], writeuser=write_url[u'db_user'], readuser=read_url[u'db_user']) click.echo(sql)
@datastore.command(u'set-permissions', short_help=u'Generate SQL for permission configuration.') def set_permissions(): u'Emit an SQL script that will set the permissions for the datastore\n users as configured in your configuration file.' write_url = _parse_db_config(u'ckan.datastore.write_url') read_url = _parse_db_config(u'ckan.datastore.read_url') db_url = _parse_db_config(u'sqlalchemy.url') if (write_url[u'db_name'] != read_url[u'db_name']): click.secho(u'The datastore write_url and read_url must refer to the same database!', fg=u'red', bold=True) raise click.Abort() sql = permissions_sql(maindb=db_url[u'db_name'], datastoredb=write_url[u'db_name'], mainuser=db_url[u'db_user'], writeuser=write_url[u'db_user'], readuser=read_url[u'db_user']) click.echo(sql)<|docstring|>Emit an SQL script that will set the permissions for the datastore users as configured in your configuration file.<|endoftext|>
5f5fca688c9937bd3275bfcb61de16e8c54d0a4ced370d376f596313406bb2a7
@datastore.command() @click.argument(u'resource-id', nargs=1) @click.argument(u'output-file', type=click.File(u'wb'), default=click.get_binary_stream(u'stdout')) @click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS)) @click.option(u'--offset', type=click.IntRange(0, None), default=0) @click.option(u'--limit', type=click.IntRange(0)) @click.option(u'--bom', is_flag=True) @click.pass_context def dump(ctx, resource_id, output_file, format, offset, limit, bom): u'Dump a datastore resource.\n ' flask_app = ctx.meta['flask_app'] with flask_app.test_request_context(): dump_to(resource_id, output_file, fmt=format, offset=offset, limit=limit, options={u'bom': bom}, sort=u'_id', search_params={})
Dump a datastore resource.
ckanext/datastore/cli.py
dump
gvarela1981/ckan
58
python
@datastore.command() @click.argument(u'resource-id', nargs=1) @click.argument(u'output-file', type=click.File(u'wb'), default=click.get_binary_stream(u'stdout')) @click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS)) @click.option(u'--offset', type=click.IntRange(0, None), default=0) @click.option(u'--limit', type=click.IntRange(0)) @click.option(u'--bom', is_flag=True) @click.pass_context def dump(ctx, resource_id, output_file, format, offset, limit, bom): u'\n ' flask_app = ctx.meta['flask_app'] with flask_app.test_request_context(): dump_to(resource_id, output_file, fmt=format, offset=offset, limit=limit, options={u'bom': bom}, sort=u'_id', search_params={})
@datastore.command() @click.argument(u'resource-id', nargs=1) @click.argument(u'output-file', type=click.File(u'wb'), default=click.get_binary_stream(u'stdout')) @click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS)) @click.option(u'--offset', type=click.IntRange(0, None), default=0) @click.option(u'--limit', type=click.IntRange(0)) @click.option(u'--bom', is_flag=True) @click.pass_context def dump(ctx, resource_id, output_file, format, offset, limit, bom): u'\n ' flask_app = ctx.meta['flask_app'] with flask_app.test_request_context(): dump_to(resource_id, output_file, fmt=format, offset=offset, limit=limit, options={u'bom': bom}, sort=u'_id', search_params={})<|docstring|>Dump a datastore resource.<|endoftext|>
c5390e1e8494b001e286b7d6833542c30343f759e3915ac918ea1abd98df83d4
def __init__(self, collections=[], query_bands=[], bbox=(), start_date=None, end_date=None, limit=30): 'Build DataCube object with config parameters including access token, STAC url and earth observation service url.' if (len(config.ACCESS_TOKEN) == 0): config.ACCESS_TOKEN = input('Please insert a valid user token from BDC Auth: ') if (len(config.EOCUBE_URL) == 0): config.EOCUBE_URL = input('Please insert a valid url for EO Service: ') if (len(config.STAC_URL) == 0): config.STAC_URL = input('Please insert a valid url for STAC Service: ') self.utils = Utils() self.stac_client = stac.STAC(config.STAC_URL, access_token=config.ACCESS_TOKEN) if (not collections): raise AttributeError('Please insert a list of available collections!') else: self.collections = collections if (not query_bands): raise AttributeError('Please insert a list of available bands with query_bands!') else: self.query_bands = query_bands if (not bbox): raise AttributeError('Please insert a bounding box parameter!') else: valid = self.utils._validateBBOX(bbox) if valid: self.bbox = bbox try: _start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') if (_end_date <= _start_date): raise ValueError('Start date is greater than end date!') else: self.start_date = start_date self.end_date = end_date except: raise AttributeError('Dates are not correctly formatted, should be %Y-%m-%d') self.timeline = [] self.data_images = {} self.data_array = None items = None try: items = self.stac_client.search({'collections': self.collections, 'bbox': self.bbox, 'datetime': f'{self.start_date}/{self.end_date}', 'limit': limit}) except: raise RuntimeError('Connection refused!') images = [] if items: for item in items.features: bands = {} available_bands = item.get('properties').get('eo:bands') for band in available_bands: band_common_name = str(band.get('common_name', '')) band_name = str(band.get('name')) if (band_common_name in self.query_bands): bands[band_common_name] = band.get('name') elif (band_name in self.query_bands): bands[band_name] = band_name images.append(Image(item=item, bands=bands, bbox=self.bbox)) if (len(images) == 0): raise ValueError('No data cube created!') x_data = {} for image in images: date = image.time self.data_images[date] = image x_data[date] = [] for band in self.query_bands: data = delayed(image.getBand)(band) x_data[date].append({str(band): data}) self.timeline = sorted(list(x_data.keys())) data_timeline = {} for i in range(len(list(self.query_bands))): data_timeline[self.query_bands[i]] = [] for time in self.timeline: data_timeline[self.query_bands[i]].append(x_data[time][i][self.query_bands[i]]) time_series = [] for band in self.query_bands: time_series.append(data_timeline[band]) self.description = {} for collection in self.collections: response = self.stac_client.collections[collection] self.description[str(response['id'])] = str(response['title']) self.data_array = xr.DataArray(np.array(time_series), coords=[self.query_bands, self.timeline], dims=['band', 'time'], name=['DataCube']) self.data_array.attrs = self.description
Build DataCube object with config parameters including access token, STAC url and earth observation service url.
eocube/eocube.py
__init__
AbnerErnaniADSFatec/eocubes-dev
0
python
def __init__(self, collections=[], query_bands=[], bbox=(), start_date=None, end_date=None, limit=30): if (len(config.ACCESS_TOKEN) == 0): config.ACCESS_TOKEN = input('Please insert a valid user token from BDC Auth: ') if (len(config.EOCUBE_URL) == 0): config.EOCUBE_URL = input('Please insert a valid url for EO Service: ') if (len(config.STAC_URL) == 0): config.STAC_URL = input('Please insert a valid url for STAC Service: ') self.utils = Utils() self.stac_client = stac.STAC(config.STAC_URL, access_token=config.ACCESS_TOKEN) if (not collections): raise AttributeError('Please insert a list of available collections!') else: self.collections = collections if (not query_bands): raise AttributeError('Please insert a list of available bands with query_bands!') else: self.query_bands = query_bands if (not bbox): raise AttributeError('Please insert a bounding box parameter!') else: valid = self.utils._validateBBOX(bbox) if valid: self.bbox = bbox try: _start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') if (_end_date <= _start_date): raise ValueError('Start date is greater than end date!') else: self.start_date = start_date self.end_date = end_date except: raise AttributeError('Dates are not correctly formatted, should be %Y-%m-%d') self.timeline = [] self.data_images = {} self.data_array = None items = None try: items = self.stac_client.search({'collections': self.collections, 'bbox': self.bbox, 'datetime': f'{self.start_date}/{self.end_date}', 'limit': limit}) except: raise RuntimeError('Connection refused!') images = [] if items: for item in items.features: bands = {} available_bands = item.get('properties').get('eo:bands') for band in available_bands: band_common_name = str(band.get('common_name', )) band_name = str(band.get('name')) if (band_common_name in self.query_bands): bands[band_common_name] = band.get('name') elif (band_name in self.query_bands): bands[band_name] = band_name images.append(Image(item=item, bands=bands, bbox=self.bbox)) if (len(images) == 0): raise ValueError('No data cube created!') x_data = {} for image in images: date = image.time self.data_images[date] = image x_data[date] = [] for band in self.query_bands: data = delayed(image.getBand)(band) x_data[date].append({str(band): data}) self.timeline = sorted(list(x_data.keys())) data_timeline = {} for i in range(len(list(self.query_bands))): data_timeline[self.query_bands[i]] = [] for time in self.timeline: data_timeline[self.query_bands[i]].append(x_data[time][i][self.query_bands[i]]) time_series = [] for band in self.query_bands: time_series.append(data_timeline[band]) self.description = {} for collection in self.collections: response = self.stac_client.collections[collection] self.description[str(response['id'])] = str(response['title']) self.data_array = xr.DataArray(np.array(time_series), coords=[self.query_bands, self.timeline], dims=['band', 'time'], name=['DataCube']) self.data_array.attrs = self.description
def __init__(self, collections=[], query_bands=[], bbox=(), start_date=None, end_date=None, limit=30): if (len(config.ACCESS_TOKEN) == 0): config.ACCESS_TOKEN = input('Please insert a valid user token from BDC Auth: ') if (len(config.EOCUBE_URL) == 0): config.EOCUBE_URL = input('Please insert a valid url for EO Service: ') if (len(config.STAC_URL) == 0): config.STAC_URL = input('Please insert a valid url for STAC Service: ') self.utils = Utils() self.stac_client = stac.STAC(config.STAC_URL, access_token=config.ACCESS_TOKEN) if (not collections): raise AttributeError('Please insert a list of available collections!') else: self.collections = collections if (not query_bands): raise AttributeError('Please insert a list of available bands with query_bands!') else: self.query_bands = query_bands if (not bbox): raise AttributeError('Please insert a bounding box parameter!') else: valid = self.utils._validateBBOX(bbox) if valid: self.bbox = bbox try: _start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') if (_end_date <= _start_date): raise ValueError('Start date is greater than end date!') else: self.start_date = start_date self.end_date = end_date except: raise AttributeError('Dates are not correctly formatted, should be %Y-%m-%d') self.timeline = [] self.data_images = {} self.data_array = None items = None try: items = self.stac_client.search({'collections': self.collections, 'bbox': self.bbox, 'datetime': f'{self.start_date}/{self.end_date}', 'limit': limit}) except: raise RuntimeError('Connection refused!') images = [] if items: for item in items.features: bands = {} available_bands = item.get('properties').get('eo:bands') for band in available_bands: band_common_name = str(band.get('common_name', )) band_name = str(band.get('name')) if (band_common_name in self.query_bands): bands[band_common_name] = band.get('name') elif (band_name in self.query_bands): bands[band_name] = band_name images.append(Image(item=item, bands=bands, bbox=self.bbox)) if (len(images) == 0): raise ValueError('No data cube created!') x_data = {} for image in images: date = image.time self.data_images[date] = image x_data[date] = [] for band in self.query_bands: data = delayed(image.getBand)(band) x_data[date].append({str(band): data}) self.timeline = sorted(list(x_data.keys())) data_timeline = {} for i in range(len(list(self.query_bands))): data_timeline[self.query_bands[i]] = [] for time in self.timeline: data_timeline[self.query_bands[i]].append(x_data[time][i][self.query_bands[i]]) time_series = [] for band in self.query_bands: time_series.append(data_timeline[band]) self.description = {} for collection in self.collections: response = self.stac_client.collections[collection] self.description[str(response['id'])] = str(response['title']) self.data_array = xr.DataArray(np.array(time_series), coords=[self.query_bands, self.timeline], dims=['band', 'time'], name=['DataCube']) self.data_array.attrs = self.description<|docstring|>Build DataCube object with config parameters including access token, STAC url and earth observation service url.<|endoftext|>
c9c346176dd85163385cf4defbfd4ef3e3e5bd13811dcadcf5cf336db6909a50
def nearTime(self, time): 'Search in all dataset a date time near to given time.\n Return a date time from dataset timeline.\n\n Parameters\n\n - time <datetime, required>: the given date time to search formated "yyyy-mm-dd".\n\n Raise\n\n - ValueError: If time is not correctly formatted.\n ' _date = self.data_array.sel(time=time, method='nearest').time.values _date = datetime.datetime.utcfromtimestamp((_date.tolist() / 1000000000.0)) return _date
Search in all dataset a date time near to given time. Return a date time from dataset timeline. Parameters - time <datetime, required>: the given date time to search formated "yyyy-mm-dd". Raise - ValueError: If time is not correctly formatted.
eocube/eocube.py
nearTime
AbnerErnaniADSFatec/eocubes-dev
0
python
def nearTime(self, time): 'Search in all dataset a date time near to given time.\n Return a date time from dataset timeline.\n\n Parameters\n\n - time <datetime, required>: the given date time to search formated "yyyy-mm-dd".\n\n Raise\n\n - ValueError: If time is not correctly formatted.\n ' _date = self.data_array.sel(time=time, method='nearest').time.values _date = datetime.datetime.utcfromtimestamp((_date.tolist() / 1000000000.0)) return _date
def nearTime(self, time): 'Search in all dataset a date time near to given time.\n Return a date time from dataset timeline.\n\n Parameters\n\n - time <datetime, required>: the given date time to search formated "yyyy-mm-dd".\n\n Raise\n\n - ValueError: If time is not correctly formatted.\n ' _date = self.data_array.sel(time=time, method='nearest').time.values _date = datetime.datetime.utcfromtimestamp((_date.tolist() / 1000000000.0)) return _date<|docstring|>Search in all dataset a date time near to given time. Return a date time from dataset timeline. Parameters - time <datetime, required>: the given date time to search formated "yyyy-mm-dd". Raise - ValueError: If time is not correctly formatted.<|endoftext|>
94c60fca90f9ec961cefb2d84de22127beb07f5cd5198e9d19752a0007b25ff0
def select(self, band=None, time=None, start_date=None, end_date=None): 'Select method to retrieve data from delayed dataset and return all dataset for black params but takes longer.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - time <string, optional>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' result = None if (start_date and end_date): _start_date = self.nearTime(start_date) _end_date = self.nearTime(end_date) else: _start_date = self.nearTime(self.start_date) _end_date = self.nearTime(self.end_date) if band: if time: _date = self.nearTime(time) _timeline = [_date] _data = self.data_array.loc[(band, _date)].values.reshape(1) else: _data = self.data_array.loc[(band, _start_date:_end_date)] _timeline = _data.time.values _data = _data.values _result = [] for raster in _data: value = raster.compute() _result.append(value) _x = list(range(0, value.shape[1])) _y = list(range(0, value.shape[0])) result = xr.DataArray(np.array(_result), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=[f'ResultSearch_{band}']) else: _bands = self.query_bands _timeline = self.timeline _data = [] for band in _bands: d = self.data_array.loc[band].values values = [] for i in range(len(d)): raster = d[i].compute() values.append(raster) _x = list(range(0, raster.shape[1])) _y = list(range(0, raster.shape[0])) _data.append(values) result = xr.DataArray(np.array(_data), coords=[_bands, _timeline, _y, _x], dims=['band', 'time', 'y', 'x'], name=['DataCube']) result.attrs = self.description return result
Select method to retrieve data from delayed dataset and return all dataset for black params but takes longer. Parameters: - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections). - time <string, optional>: The given time to retrieve a sigle image formated "yyyy-mm-dd". - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. Raise: - KeyError: If the given parameter not exists.
eocube/eocube.py
select
AbnerErnaniADSFatec/eocubes-dev
0
python
def select(self, band=None, time=None, start_date=None, end_date=None): 'Select method to retrieve data from delayed dataset and return all dataset for black params but takes longer.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - time <string, optional>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' result = None if (start_date and end_date): _start_date = self.nearTime(start_date) _end_date = self.nearTime(end_date) else: _start_date = self.nearTime(self.start_date) _end_date = self.nearTime(self.end_date) if band: if time: _date = self.nearTime(time) _timeline = [_date] _data = self.data_array.loc[(band, _date)].values.reshape(1) else: _data = self.data_array.loc[(band, _start_date:_end_date)] _timeline = _data.time.values _data = _data.values _result = [] for raster in _data: value = raster.compute() _result.append(value) _x = list(range(0, value.shape[1])) _y = list(range(0, value.shape[0])) result = xr.DataArray(np.array(_result), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=[f'ResultSearch_{band}']) else: _bands = self.query_bands _timeline = self.timeline _data = [] for band in _bands: d = self.data_array.loc[band].values values = [] for i in range(len(d)): raster = d[i].compute() values.append(raster) _x = list(range(0, raster.shape[1])) _y = list(range(0, raster.shape[0])) _data.append(values) result = xr.DataArray(np.array(_data), coords=[_bands, _timeline, _y, _x], dims=['band', 'time', 'y', 'x'], name=['DataCube']) result.attrs = self.description return result
def select(self, band=None, time=None, start_date=None, end_date=None): 'Select method to retrieve data from delayed dataset and return all dataset for black params but takes longer.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - time <string, optional>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' result = None if (start_date and end_date): _start_date = self.nearTime(start_date) _end_date = self.nearTime(end_date) else: _start_date = self.nearTime(self.start_date) _end_date = self.nearTime(self.end_date) if band: if time: _date = self.nearTime(time) _timeline = [_date] _data = self.data_array.loc[(band, _date)].values.reshape(1) else: _data = self.data_array.loc[(band, _start_date:_end_date)] _timeline = _data.time.values _data = _data.values _result = [] for raster in _data: value = raster.compute() _result.append(value) _x = list(range(0, value.shape[1])) _y = list(range(0, value.shape[0])) result = xr.DataArray(np.array(_result), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=[f'ResultSearch_{band}']) else: _bands = self.query_bands _timeline = self.timeline _data = [] for band in _bands: d = self.data_array.loc[band].values values = [] for i in range(len(d)): raster = d[i].compute() values.append(raster) _x = list(range(0, raster.shape[1])) _y = list(range(0, raster.shape[0])) _data.append(values) result = xr.DataArray(np.array(_data), coords=[_bands, _timeline, _y, _x], dims=['band', 'time', 'y', 'x'], name=['DataCube']) result.attrs = self.description return result<|docstring|>Select method to retrieve data from delayed dataset and return all dataset for black params but takes longer. Parameters: - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections). - time <string, optional>: The given time to retrieve a sigle image formated "yyyy-mm-dd". - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. Raise: - KeyError: If the given parameter not exists.<|endoftext|>
39ba0af42e93598442bf55c2226bc469b991977cac03f4ea4cd8fc3269611f83
def getTimeSeries(self, band=None, lon=None, lat=None, start_date=None, end_date=None): 'Get time series band values from a given point and timeline.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - lon <float, optional>: The given longitude of point (EPSG:4326).\n\n - lat <float, optional>: The given latitude of point (EPSG:4326).\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' _image = None if (start_date and end_date): start = start_date end = end_date else: start = self.start_date end = self.end_date _start_date = datetime.datetime.strptime(start, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end, '%Y-%m-%d') for time in self.timeline: if ((time.year == _start_date.year) and (time.month == _start_date.month)): _image = self.data_images[time] break if (not _image): _image = self.data_images[self.timeline[0]] point = _image._afimCoordsToPoint(lon, lat, band) _data = self.data_array.loc[(band, _start_date:_end_date)] sample = _data.values[0].compute() if ((point[0] > sample.shape[0]) and (point[1] > sample.shape[1])): raise ValueError(f'Given point is out of bounding box {self.bbox}') result = [] for raster in _data.values: result.append(raster.compute()[point[0]][point[1]]) _result = xr.DataArray(np.array(result), coords=[_data.time], dims=['time'], name=[f'TimeSeries_{band.upper()}']) _result.attrs = {'longitude': lon, 'latitude': lat} return _result
Get time series band values from a given point and timeline. Parameters: - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections). - lon <float, optional>: The given longitude of point (EPSG:4326). - lat <float, optional>: The given latitude of point (EPSG:4326). - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. Raise: - KeyError: If the given parameter not exists.
eocube/eocube.py
getTimeSeries
AbnerErnaniADSFatec/eocubes-dev
0
python
def getTimeSeries(self, band=None, lon=None, lat=None, start_date=None, end_date=None): 'Get time series band values from a given point and timeline.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - lon <float, optional>: The given longitude of point (EPSG:4326).\n\n - lat <float, optional>: The given latitude of point (EPSG:4326).\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' _image = None if (start_date and end_date): start = start_date end = end_date else: start = self.start_date end = self.end_date _start_date = datetime.datetime.strptime(start, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end, '%Y-%m-%d') for time in self.timeline: if ((time.year == _start_date.year) and (time.month == _start_date.month)): _image = self.data_images[time] break if (not _image): _image = self.data_images[self.timeline[0]] point = _image._afimCoordsToPoint(lon, lat, band) _data = self.data_array.loc[(band, _start_date:_end_date)] sample = _data.values[0].compute() if ((point[0] > sample.shape[0]) and (point[1] > sample.shape[1])): raise ValueError(f'Given point is out of bounding box {self.bbox}') result = [] for raster in _data.values: result.append(raster.compute()[point[0]][point[1]]) _result = xr.DataArray(np.array(result), coords=[_data.time], dims=['time'], name=[f'TimeSeries_{band.upper()}']) _result.attrs = {'longitude': lon, 'latitude': lat} return _result
def getTimeSeries(self, band=None, lon=None, lat=None, start_date=None, end_date=None): 'Get time series band values from a given point and timeline.\n\n Parameters:\n\n - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - lon <float, optional>: The given longitude of point (EPSG:4326).\n\n - lat <float, optional>: The given latitude of point (EPSG:4326).\n\n - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' _image = None if (start_date and end_date): start = start_date end = end_date else: start = self.start_date end = self.end_date _start_date = datetime.datetime.strptime(start, '%Y-%m-%d') _end_date = datetime.datetime.strptime(end, '%Y-%m-%d') for time in self.timeline: if ((time.year == _start_date.year) and (time.month == _start_date.month)): _image = self.data_images[time] break if (not _image): _image = self.data_images[self.timeline[0]] point = _image._afimCoordsToPoint(lon, lat, band) _data = self.data_array.loc[(band, _start_date:_end_date)] sample = _data.values[0].compute() if ((point[0] > sample.shape[0]) and (point[1] > sample.shape[1])): raise ValueError(f'Given point is out of bounding box {self.bbox}') result = [] for raster in _data.values: result.append(raster.compute()[point[0]][point[1]]) _result = xr.DataArray(np.array(result), coords=[_data.time], dims=['time'], name=[f'TimeSeries_{band.upper()}']) _result.attrs = {'longitude': lon, 'latitude': lat} return _result<|docstring|>Get time series band values from a given point and timeline. Parameters: - band <string, optional>: The commom name of band (nir, ndvi, red, ... see info.collections). - lon <float, optional>: The given longitude of point (EPSG:4326). - lat <float, optional>: The given latitude of point (EPSG:4326). - start_date <string, optional>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, optional>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. Raise: - KeyError: If the given parameter not exists.<|endoftext|>
9648d5dd0889224297022d49eb7b5486cc6b720cfdb88f5ae1c98636f71e0b29
def calculateNDVI(self, time): 'Calculate the Normalized Difference Vegetation Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDVI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDVI']) result.attrs = self.description return result
Calculate the Normalized Difference Vegetation Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.
eocube/eocube.py
calculateNDVI
AbnerErnaniADSFatec/eocubes-dev
0
python
def calculateNDVI(self, time): 'Calculate the Normalized Difference Vegetation Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDVI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDVI']) result.attrs = self.description return result
def calculateNDVI(self, time): 'Calculate the Normalized Difference Vegetation Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDVI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDVI']) result.attrs = self.description return result<|docstring|>Calculate the Normalized Difference Vegetation Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.<|endoftext|>
a979b4189263243eb1f05b3cbfae5852c525bbaa275e6275eef8af28e93ef7f1
def calculateNDWI(self, time): 'Calculate the Normalized Difference Water Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDWI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDWI']) result.attrs = self.description return result
Calculate the Normalized Difference Water Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.
eocube/eocube.py
calculateNDWI
AbnerErnaniADSFatec/eocubes-dev
0
python
def calculateNDWI(self, time): 'Calculate the Normalized Difference Water Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDWI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDWI']) result.attrs = self.description return result
def calculateNDWI(self, time): 'Calculate the Normalized Difference Water Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDWI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDWI']) result.attrs = self.description return result<|docstring|>Calculate the Normalized Difference Water Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.<|endoftext|>
2a141ea202a760440d8d5d97f62f460b7686f306f3e6422ef11894546afcbd53
def calculateNDBI(self, time): 'Calculate the Normalized Difference Built-up Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDBI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDBI']) result.attrs = self.description return result
Calculate the Normalized Difference Built-up Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.
eocube/eocube.py
calculateNDBI
AbnerErnaniADSFatec/eocubes-dev
0
python
def calculateNDBI(self, time): 'Calculate the Normalized Difference Built-up Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDBI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDBI']) result.attrs = self.description return result
def calculateNDBI(self, time): 'Calculate the Normalized Difference Built-up Index - NDVI of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getNDBI() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ImageNDBI']) result.attrs = self.description return result<|docstring|>Calculate the Normalized Difference Built-up Index - NDVI of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.<|endoftext|>
034208a4c3c56254802b7921ae63960e398fe65b4ec43ba9f98886b7e97aef66
def calculateColorComposition(self, time): 'Calculate the color composition RGB of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getRGB() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _rgb = ['red', 'green', 'blue'] result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x, _rgb], dims=['time', 'y', 'x', 'rgb'], name=['ColorComposition']) result.attrs = self.description return result
Calculate the color composition RGB of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.
eocube/eocube.py
calculateColorComposition
AbnerErnaniADSFatec/eocubes-dev
0
python
def calculateColorComposition(self, time): 'Calculate the color composition RGB of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getRGB() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _rgb = ['red', 'green', 'blue'] result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x, _rgb], dims=['time', 'y', 'x', 'rgb'], name=['ColorComposition']) result.attrs = self.description return result
def calculateColorComposition(self, time): 'Calculate the color composition RGB of a given period.\n\n Parameters\n\n - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd".\n\n Raise:\n\n - KeyError: No data for given date time selected.\n ' _date = self.nearTime(time) _data = self.data_images[_date].getRGB() _timeline = [_date] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _rgb = ['red', 'green', 'blue'] result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x, _rgb], dims=['time', 'y', 'x', 'rgb'], name=['ColorComposition']) result.attrs = self.description return result<|docstring|>Calculate the color composition RGB of a given period. Parameters - time <string, required>: The given time to retrieve a sigle image formated "yyyy-mm-dd". Raise: - KeyError: No data for given date time selected.<|endoftext|>
fb1753f5ec4917d128dba5cd59ee8639c8a056b08f6895166e2af4bf07739036
def classifyDifference(self, band, start_date, end_date, limiar_min=0, limiar_max=0): 'Classify two different images with start and end date based on limiar mim and max.\n\n Parameters:\n\n - band <string, required>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - start_date <string, required>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, required>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n - limiar_min <float, required>: The minimum value classified to difference.\n\n - limiar_max <float, required>: The maximum value classified to difference to complete the interval.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' time_1 = self.nearTime(start_date) data_1 = self.data_images[time_1].getBand(band) time_2 = self.nearTime(end_date) data_2 = self.data_images[time_2].getBand(band) spectral = Spectral() data_1 = spectral._format(data_1) data_2 = spectral._format(data_2) _data = None if spectral._validate_shape(data_1, data_2): diff = spectral._matrix_diff(data_1, data_2) _data = spectral._classify_diff(diff, limiar_min=limiar_min, limiar_max=limiar_max) else: raise ValueError('Time 1 and 2 has different shapes!') _timeline = [f'{time_1} - {time_2}'] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ClassifyDifference']) return _result
Classify two different images with start and end date based on limiar mim and max. Parameters: - band <string, required>: The commom name of band (nir, ndvi, red, ... see info.collections). - start_date <string, required>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, required>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. - limiar_min <float, required>: The minimum value classified to difference. - limiar_max <float, required>: The maximum value classified to difference to complete the interval. Raise: - KeyError: If the given parameter not exists.
eocube/eocube.py
classifyDifference
AbnerErnaniADSFatec/eocubes-dev
0
python
def classifyDifference(self, band, start_date, end_date, limiar_min=0, limiar_max=0): 'Classify two different images with start and end date based on limiar mim and max.\n\n Parameters:\n\n - band <string, required>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - start_date <string, required>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, required>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n - limiar_min <float, required>: The minimum value classified to difference.\n\n - limiar_max <float, required>: The maximum value classified to difference to complete the interval.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' time_1 = self.nearTime(start_date) data_1 = self.data_images[time_1].getBand(band) time_2 = self.nearTime(end_date) data_2 = self.data_images[time_2].getBand(band) spectral = Spectral() data_1 = spectral._format(data_1) data_2 = spectral._format(data_2) _data = None if spectral._validate_shape(data_1, data_2): diff = spectral._matrix_diff(data_1, data_2) _data = spectral._classify_diff(diff, limiar_min=limiar_min, limiar_max=limiar_max) else: raise ValueError('Time 1 and 2 has different shapes!') _timeline = [f'{time_1} - {time_2}'] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ClassifyDifference']) return _result
def classifyDifference(self, band, start_date, end_date, limiar_min=0, limiar_max=0): 'Classify two different images with start and end date based on limiar mim and max.\n\n Parameters:\n\n - band <string, required>: The commom name of band (nir, ndvi, red, ... see info.collections).\n\n - start_date <string, required>: The string start date formated "yyyy-mm-dd" to complete the interval.\n\n - end_date <string, required>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset.\n\n - limiar_min <float, required>: The minimum value classified to difference.\n\n - limiar_max <float, required>: The maximum value classified to difference to complete the interval.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' time_1 = self.nearTime(start_date) data_1 = self.data_images[time_1].getBand(band) time_2 = self.nearTime(end_date) data_2 = self.data_images[time_2].getBand(band) spectral = Spectral() data_1 = spectral._format(data_1) data_2 = spectral._format(data_2) _data = None if spectral._validate_shape(data_1, data_2): diff = spectral._matrix_diff(data_1, data_2) _data = spectral._classify_diff(diff, limiar_min=limiar_min, limiar_max=limiar_max) else: raise ValueError('Time 1 and 2 has different shapes!') _timeline = [f'{time_1} - {time_2}'] _x = list(range(0, _data.shape[1])) _y = list(range(0, _data.shape[0])) _result = xr.DataArray(np.array([_data]), coords=[_timeline, _y, _x], dims=['time', 'y', 'x'], name=['ClassifyDifference']) return _result<|docstring|>Classify two different images with start and end date based on limiar mim and max. Parameters: - band <string, required>: The commom name of band (nir, ndvi, red, ... see info.collections). - start_date <string, required>: The string start date formated "yyyy-mm-dd" to complete the interval. - end_date <string, required>: The string end date formated "yyyy-mm-dd" to complete the interval and retrieve a dataset. - limiar_min <float, required>: The minimum value classified to difference. - limiar_max <float, required>: The maximum value classified to difference to complete the interval. Raise: - KeyError: If the given parameter not exists.<|endoftext|>
45de1528c29f59147b3d7d8227178e1f72a56bd8f3e41fb9a0b6360a2d8c179a
def interactPlot(self, method): 'Return all dataset with a interactive plot date time slider.\n\n Parameters:\n\n - method <string, required>: The method like rgb, ndvi, ndwi, ndbi, ... or any of selected bands.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' @interact(date=self.timeline) def sliderplot(date): plt.clf() plt.figure(figsize=(25, 8)) if (method == 'rgb'): plt.imshow(self.data_images[date].getRGB()) plt.title(f''' Composição Colorida Verdadeira {date} ''') elif ((method == 'ndvi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greens', 1000) plt.imshow(self.data_images[date].getNDVI(), cmap=colormap) plt.title(f''' NDVI - Normalized Difference Vegetation Index {date} ''') plt.colorbar() elif ((method == 'ndwi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Blues', 1000) plt.imshow(self.data_images[date].getNDWI(), cmap=colormap) plt.title(f''' NDWI - Normalized Difference Water Index {date} ''') plt.colorbar() elif ((method == 'ndbi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greys', 1000) plt.imshow(self.data_images[date].getNDBI(), cmap=colormap) plt.title(f''' NDBI - Normalized Difference Built-up Index {date} ''') plt.colorbar() elif (method in self.query_bands): colormap = plt.get_cmap('Greys', 1000) if (method in ['red', 'green', 'blue']): colormap = plt.get_cmap('Greys', 255).reversed() plt.imshow(self.data_images[date].getBand(method), cmap=colormap) plt.title(f''' Composição da Banda {method.upper()} {date} ''') plt.colorbar() else: raise ValueError('Please insert a valid method rgb, ndvi, ndwi, ndbi, ... or any of selected bands!') plt.tight_layout() plt.show()
Return all dataset with a interactive plot date time slider. Parameters: - method <string, required>: The method like rgb, ndvi, ndwi, ndbi, ... or any of selected bands. Raise: - KeyError: If the given parameter not exists.
eocube/eocube.py
interactPlot
AbnerErnaniADSFatec/eocubes-dev
0
python
def interactPlot(self, method): 'Return all dataset with a interactive plot date time slider.\n\n Parameters:\n\n - method <string, required>: The method like rgb, ndvi, ndwi, ndbi, ... or any of selected bands.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' @interact(date=self.timeline) def sliderplot(date): plt.clf() plt.figure(figsize=(25, 8)) if (method == 'rgb'): plt.imshow(self.data_images[date].getRGB()) plt.title(f' Composição Colorida Verdadeira {date} ') elif ((method == 'ndvi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greens', 1000) plt.imshow(self.data_images[date].getNDVI(), cmap=colormap) plt.title(f' NDVI - Normalized Difference Vegetation Index {date} ') plt.colorbar() elif ((method == 'ndwi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Blues', 1000) plt.imshow(self.data_images[date].getNDWI(), cmap=colormap) plt.title(f' NDWI - Normalized Difference Water Index {date} ') plt.colorbar() elif ((method == 'ndbi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greys', 1000) plt.imshow(self.data_images[date].getNDBI(), cmap=colormap) plt.title(f' NDBI - Normalized Difference Built-up Index {date} ') plt.colorbar() elif (method in self.query_bands): colormap = plt.get_cmap('Greys', 1000) if (method in ['red', 'green', 'blue']): colormap = plt.get_cmap('Greys', 255).reversed() plt.imshow(self.data_images[date].getBand(method), cmap=colormap) plt.title(f' Composição da Banda {method.upper()} {date} ') plt.colorbar() else: raise ValueError('Please insert a valid method rgb, ndvi, ndwi, ndbi, ... or any of selected bands!') plt.tight_layout() plt.show()
def interactPlot(self, method): 'Return all dataset with a interactive plot date time slider.\n\n Parameters:\n\n - method <string, required>: The method like rgb, ndvi, ndwi, ndbi, ... or any of selected bands.\n\n Raise:\n\n - KeyError: If the given parameter not exists.\n ' @interact(date=self.timeline) def sliderplot(date): plt.clf() plt.figure(figsize=(25, 8)) if (method == 'rgb'): plt.imshow(self.data_images[date].getRGB()) plt.title(f' Composição Colorida Verdadeira {date} ') elif ((method == 'ndvi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greens', 1000) plt.imshow(self.data_images[date].getNDVI(), cmap=colormap) plt.title(f' NDVI - Normalized Difference Vegetation Index {date} ') plt.colorbar() elif ((method == 'ndwi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Blues', 1000) plt.imshow(self.data_images[date].getNDWI(), cmap=colormap) plt.title(f' NDWI - Normalized Difference Water Index {date} ') plt.colorbar() elif ((method == 'ndbi') and (not (method in self.query_bands))): colormap = plt.get_cmap('Greys', 1000) plt.imshow(self.data_images[date].getNDBI(), cmap=colormap) plt.title(f' NDBI - Normalized Difference Built-up Index {date} ') plt.colorbar() elif (method in self.query_bands): colormap = plt.get_cmap('Greys', 1000) if (method in ['red', 'green', 'blue']): colormap = plt.get_cmap('Greys', 255).reversed() plt.imshow(self.data_images[date].getBand(method), cmap=colormap) plt.title(f' Composição da Banda {method.upper()} {date} ') plt.colorbar() else: raise ValueError('Please insert a valid method rgb, ndvi, ndwi, ndbi, ... or any of selected bands!') plt.tight_layout() plt.show()<|docstring|>Return all dataset with a interactive plot date time slider. Parameters: - method <string, required>: The method like rgb, ndvi, ndwi, ndbi, ... or any of selected bands. Raise: - KeyError: If the given parameter not exists.<|endoftext|>
e5a876b0dbead667c3d77ef6fb0ec9e55c1bf8866ff8b78215fbda9d434b35c0
@commands.command(name='help') async def help(self, context): '\n List all commands from every Cog the bot has loaded.\n ' prefix = config['bot_prefix'] if (not isinstance(prefix, str)): prefix = prefix[0] embed = discord.Embed(title='Help', description='List of available commands:', color=config['success']) for i in self.bot.cogs: cog = self.bot.get_cog(i.lower()) print(i) if (i not in ['owner', 'template', 'moderation']): commands = cog.get_commands() command_list = [command.name for command in commands] command_description = [command.help for command in commands] help_text = '\n'.join((f'{prefix}{n} - {h}' for (n, h) in zip(command_list, command_description))) embed.add_field(name=i.capitalize(), value=f'```{help_text}```', inline=False) (await context.send(embed=embed))
List all commands from every Cog the bot has loaded.
cogs/help.py
help
Nanosplitter/DadBotV2.0
2
python
@commands.command(name='help') async def help(self, context): '\n \n ' prefix = config['bot_prefix'] if (not isinstance(prefix, str)): prefix = prefix[0] embed = discord.Embed(title='Help', description='List of available commands:', color=config['success']) for i in self.bot.cogs: cog = self.bot.get_cog(i.lower()) print(i) if (i not in ['owner', 'template', 'moderation']): commands = cog.get_commands() command_list = [command.name for command in commands] command_description = [command.help for command in commands] help_text = '\n'.join((f'{prefix}{n} - {h}' for (n, h) in zip(command_list, command_description))) embed.add_field(name=i.capitalize(), value=f'```{help_text}```', inline=False) (await context.send(embed=embed))
@commands.command(name='help') async def help(self, context): '\n \n ' prefix = config['bot_prefix'] if (not isinstance(prefix, str)): prefix = prefix[0] embed = discord.Embed(title='Help', description='List of available commands:', color=config['success']) for i in self.bot.cogs: cog = self.bot.get_cog(i.lower()) print(i) if (i not in ['owner', 'template', 'moderation']): commands = cog.get_commands() command_list = [command.name for command in commands] command_description = [command.help for command in commands] help_text = '\n'.join((f'{prefix}{n} - {h}' for (n, h) in zip(command_list, command_description))) embed.add_field(name=i.capitalize(), value=f'```{help_text}```', inline=False) (await context.send(embed=embed))<|docstring|>List all commands from every Cog the bot has loaded.<|endoftext|>
4ce8cc981c8ac6ff9c13af7410d26694a0de292b2bc53c8969f0c9ebdcc76c9e
def test_logging(): 'Test the logging function' differ.utils.setup_logging('INFO') differ.utils.setup_logging('DEBUG') differ.utils.setup_logging('ERROR')
Test the logging function
tests/test_utils.py
test_logging
MadRussian/differ
1
python
def test_logging(): differ.utils.setup_logging('INFO') differ.utils.setup_logging('DEBUG') differ.utils.setup_logging('ERROR')
def test_logging(): differ.utils.setup_logging('INFO') differ.utils.setup_logging('DEBUG') differ.utils.setup_logging('ERROR')<|docstring|>Test the logging function<|endoftext|>
70af37fcea824d1a602212426b3adc996483b62ded964230f46f6e28bb923d0c
def test_get_file_type(): 'Test get_file_type' assert (differ.utils.get_file_type(None) == None) assert (differ.utils.get_file_type("Doesn't exist") == None)
Test get_file_type
tests/test_utils.py
test_get_file_type
MadRussian/differ
1
python
def test_get_file_type(): assert (differ.utils.get_file_type(None) == None) assert (differ.utils.get_file_type("Doesn't exist") == None)
def test_get_file_type(): assert (differ.utils.get_file_type(None) == None) assert (differ.utils.get_file_type("Doesn't exist") == None)<|docstring|>Test get_file_type<|endoftext|>
79e534f8ad3a19dad760e0244cf1106ab36d5f658c407980baa5193e9c18eec7
def update(self, item: T, new_item: T) -> None: 'Updates given item value in heap if present.' if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] self._heap[index] = new_item self.elem_to_index[new_item] = index self._heapify_up(index) self._heapify_down(index)
Updates given item value in heap if present.
cs/structures/heap/binary_heap.py
update
TylerYep/workshop
1
python
def update(self, item: T, new_item: T) -> None: if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] self._heap[index] = new_item self.elem_to_index[new_item] = index self._heapify_up(index) self._heapify_down(index)
def update(self, item: T, new_item: T) -> None: if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] self._heap[index] = new_item self.elem_to_index[new_item] = index self._heapify_up(index) self._heapify_down(index)<|docstring|>Updates given item value in heap if present.<|endoftext|>
7fba11e89cebc02de4105f2b77e2ad593f18fb61ca409ca158044d29d4adcb37
def dequeue(self, item: T) -> None: 'Deletes given item from heap if present.' if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] del self.elem_to_index[item] self._heap[index] = self._heap[(self.size - 1)] self.elem_to_index[self._heap[(self.size - 1)]] = index self.size -= 1 if (self.size > index): self._heapify_up(index) self._heapify_down(index)
Deletes given item from heap if present.
cs/structures/heap/binary_heap.py
dequeue
TylerYep/workshop
1
python
def dequeue(self, item: T) -> None: if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] del self.elem_to_index[item] self._heap[index] = self._heap[(self.size - 1)] self.elem_to_index[self._heap[(self.size - 1)]] = index self.size -= 1 if (self.size > index): self._heapify_up(index) self._heapify_down(index)
def dequeue(self, item: T) -> None: if (item not in self.elem_to_index): raise KeyError('Item not found') index = self.elem_to_index[item] del self.elem_to_index[item] self._heap[index] = self._heap[(self.size - 1)] self.elem_to_index[self._heap[(self.size - 1)]] = index self.size -= 1 if (self.size > index): self._heapify_up(index) self._heapify_down(index)<|docstring|>Deletes given item from heap if present.<|endoftext|>
ad81412c35c7f807661c46caf0d20fe64b9aeedb2c0034a9906e8d0ba706f6de
def enqueue(self, item: T) -> None: 'Inserts given item with given value in heap.' new_node = item if (len(self._heap) == self.size): self._heap.append(new_node) else: self._heap[self.size] = new_node self.elem_to_index[item] = self.size self.size += 1 self._heapify_up((self.size - 1))
Inserts given item with given value in heap.
cs/structures/heap/binary_heap.py
enqueue
TylerYep/workshop
1
python
def enqueue(self, item: T) -> None: new_node = item if (len(self._heap) == self.size): self._heap.append(new_node) else: self._heap[self.size] = new_node self.elem_to_index[item] = self.size self.size += 1 self._heapify_up((self.size - 1))
def enqueue(self, item: T) -> None: new_node = item if (len(self._heap) == self.size): self._heap.append(new_node) else: self._heap[self.size] = new_node self.elem_to_index[item] = self.size self.size += 1 self._heapify_up((self.size - 1))<|docstring|>Inserts given item with given value in heap.<|endoftext|>
a75074403d3524d66e61f7470e791754dabccfe2f78b94da8a771c2491bd81e1
def peek(self) -> T: 'Returns top item tuple (Calculated value, item) from heap if present.' if (self.size == 0): raise ValueError('Heap is empty.') return self._heap[0]
Returns top item tuple (Calculated value, item) from heap if present.
cs/structures/heap/binary_heap.py
peek
TylerYep/workshop
1
python
def peek(self) -> T: if (self.size == 0): raise ValueError('Heap is empty.') return self._heap[0]
def peek(self) -> T: if (self.size == 0): raise ValueError('Heap is empty.') return self._heap[0]<|docstring|>Returns top item tuple (Calculated value, item) from heap if present.<|endoftext|>
d8ac5852d3627c4638f2416bfe0ccb477855c9f50395fca9fab5b0c23b4f1e8c
def pop(self) -> T: '\n Return top item tuple (Calculated value, item) from heap and removes it as well\n if present.\n ' top_item = self.peek() self.dequeue(top_item) return top_item
Return top item tuple (Calculated value, item) from heap and removes it as well if present.
cs/structures/heap/binary_heap.py
pop
TylerYep/workshop
1
python
def pop(self) -> T: '\n Return top item tuple (Calculated value, item) from heap and removes it as well\n if present.\n ' top_item = self.peek() self.dequeue(top_item) return top_item
def pop(self) -> T: '\n Return top item tuple (Calculated value, item) from heap and removes it as well\n if present.\n ' top_item = self.peek() self.dequeue(top_item) return top_item<|docstring|>Return top item tuple (Calculated value, item) from heap and removes it as well if present.<|endoftext|>