repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
jrderuiter/pybiomart
src/pybiomart/dataset.py
https://github.com/jrderuiter/pybiomart/blob/7802d45fe88549ab0512d6f37f815fc43b172b39/src/pybiomart/dataset.py#L294-L314
def _add_filter_node(root, filter_, value): """Adds filter xml node to root.""" filter_el = ElementTree.SubElement(root, 'Filter') filter_el.set('name', filter_.name) # Set filter value depending on type. if filter_.type == 'boolean': # Boolean case. if value is True or value.lower() in {'included', 'only'}: filter_el.set('excluded', '0') elif value is False or value.lower() == 'excluded': filter_el.set('excluded', '1') else: raise ValueError('Invalid value for boolean filter ({})' .format(value)) elif isinstance(value, list) or isinstance(value, tuple): # List case. filter_el.set('value', ','.join(map(str, value))) else: # Default case. filter_el.set('value', str(value))
[ "def", "_add_filter_node", "(", "root", ",", "filter_", ",", "value", ")", ":", "filter_el", "=", "ElementTree", ".", "SubElement", "(", "root", ",", "'Filter'", ")", "filter_el", ".", "set", "(", "'name'", ",", "filter_", ".", "name", ")", "# Set filter value depending on type.", "if", "filter_", ".", "type", "==", "'boolean'", ":", "# Boolean case.", "if", "value", "is", "True", "or", "value", ".", "lower", "(", ")", "in", "{", "'included'", ",", "'only'", "}", ":", "filter_el", ".", "set", "(", "'excluded'", ",", "'0'", ")", "elif", "value", "is", "False", "or", "value", ".", "lower", "(", ")", "==", "'excluded'", ":", "filter_el", ".", "set", "(", "'excluded'", ",", "'1'", ")", "else", ":", "raise", "ValueError", "(", "'Invalid value for boolean filter ({})'", ".", "format", "(", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "list", ")", "or", "isinstance", "(", "value", ",", "tuple", ")", ":", "# List case.", "filter_el", ".", "set", "(", "'value'", ",", "','", ".", "join", "(", "map", "(", "str", ",", "value", ")", ")", ")", "else", ":", "# Default case.", "filter_el", ".", "set", "(", "'value'", ",", "str", "(", "value", ")", ")" ]
Adds filter xml node to root.
[ "Adds", "filter", "xml", "node", "to", "root", "." ]
python
train
42.571429
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L907-L919
def fill_fw_dict_from_db(self, fw_data): """ This routine is called to create a local fw_dict with data from DB. """ rule_dict = fw_data.get('rules').get('rules') fw_dict = {'fw_id': fw_data.get('fw_id'), 'fw_name': fw_data.get('name'), 'firewall_policy_id': fw_data.get('firewall_policy_id'), 'fw_type': fw_data.get('fw_type'), 'router_id': fw_data.get('router_id'), 'rules': {}} for rule in rule_dict: fw_dict['rules'][rule] = rule_dict.get(rule) return fw_dict
[ "def", "fill_fw_dict_from_db", "(", "self", ",", "fw_data", ")", ":", "rule_dict", "=", "fw_data", ".", "get", "(", "'rules'", ")", ".", "get", "(", "'rules'", ")", "fw_dict", "=", "{", "'fw_id'", ":", "fw_data", ".", "get", "(", "'fw_id'", ")", ",", "'fw_name'", ":", "fw_data", ".", "get", "(", "'name'", ")", ",", "'firewall_policy_id'", ":", "fw_data", ".", "get", "(", "'firewall_policy_id'", ")", ",", "'fw_type'", ":", "fw_data", ".", "get", "(", "'fw_type'", ")", ",", "'router_id'", ":", "fw_data", ".", "get", "(", "'router_id'", ")", ",", "'rules'", ":", "{", "}", "}", "for", "rule", "in", "rule_dict", ":", "fw_dict", "[", "'rules'", "]", "[", "rule", "]", "=", "rule_dict", ".", "get", "(", "rule", ")", "return", "fw_dict" ]
This routine is called to create a local fw_dict with data from DB.
[ "This", "routine", "is", "called", "to", "create", "a", "local", "fw_dict", "with", "data", "from", "DB", "." ]
python
train
45.769231
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2273-L2279
def SetConsoleTitle(text: str) -> bool: """ SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False. """ return bool(ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(text)))
[ "def", "SetConsoleTitle", "(", "text", ":", "str", ")", "->", "bool", ":", "return", "bool", "(", "ctypes", ".", "windll", ".", "kernel32", ".", "SetConsoleTitleW", "(", "ctypes", ".", "c_wchar_p", "(", "text", ")", ")", ")" ]
SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False.
[ "SetConsoleTitle", "from", "Win32", ".", "text", ":", "str", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", "." ]
python
valid
32.428571
HazyResearch/metal
metal/multitask/mt_classifier.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/multitask/mt_classifier.py#L52-L77
def predict(self, X, break_ties="random", return_probs=False, **kwargs): """Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities] """ Y_s = self.predict_proba(X, **kwargs) self._check(Y_s, typ=list) self._check(Y_s[0], typ=np.ndarray) Y_p = [] for Y_ts in Y_s: Y_tp = self._break_ties(Y_ts, break_ties) Y_p.append(Y_tp.astype(np.int)) if return_probs: return Y_p, Y_s else: return Y_p
[ "def", "predict", "(", "self", ",", "X", ",", "break_ties", "=", "\"random\"", ",", "return_probs", "=", "False", ",", "*", "*", "kwargs", ")", ":", "Y_s", "=", "self", ".", "predict_proba", "(", "X", ",", "*", "*", "kwargs", ")", "self", ".", "_check", "(", "Y_s", ",", "typ", "=", "list", ")", "self", ".", "_check", "(", "Y_s", "[", "0", "]", ",", "typ", "=", "np", ".", "ndarray", ")", "Y_p", "=", "[", "]", "for", "Y_ts", "in", "Y_s", ":", "Y_tp", "=", "self", ".", "_break_ties", "(", "Y_ts", ",", "break_ties", ")", "Y_p", ".", "append", "(", "Y_tp", ".", "astype", "(", "np", ".", "int", ")", ")", "if", "return_probs", ":", "return", "Y_p", ",", "Y_s", "else", ":", "return", "Y_p" ]
Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities]
[ "Predicts", "int", "labels", "for", "an", "input", "X", "on", "all", "tasks" ]
python
train
33.538462
qiniu/python-sdk
qiniu/services/compute/qcos_api.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/compute/qcos_api.py#L261-L279
def update_service(self, stack, service, args): """更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) return self.__post(url, args)
[ "def", "update_service", "(", "self", ",", "stack", ",", "service", ",", "args", ")", ":", "url", "=", "'{0}/v3/stacks/{1}/services/{2}'", ".", "format", "(", "self", ".", "host", ",", "stack", ",", "service", ")", "return", "self", ".", "__post", "(", "url", ",", "args", ")" ]
更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
[ "更新服务" ]
python
train
35.842105
veltzer/pytconf
pytconf/config.py
https://github.com/veltzer/pytconf/blob/8dee43ace35d0dd2ab1105fb94057f650393360f/pytconf/config.py#L711-L726
def create_bool(help_string=NO_HELP, default=NO_DEFAULT): # type: (str, Union[bool, NO_DEFAULT_TYPE]) -> bool """ Create a bool parameter :param help_string: :param default: :return: """ # noinspection PyTypeChecker return ParamFunctions( help_string=help_string, default=default, type_name="bool", function_s2t=convert_string_to_bool, function_t2s=convert_bool_to_string, )
[ "def", "create_bool", "(", "help_string", "=", "NO_HELP", ",", "default", "=", "NO_DEFAULT", ")", ":", "# type: (str, Union[bool, NO_DEFAULT_TYPE]) -> bool", "# noinspection PyTypeChecker", "return", "ParamFunctions", "(", "help_string", "=", "help_string", ",", "default", "=", "default", ",", "type_name", "=", "\"bool\"", ",", "function_s2t", "=", "convert_string_to_bool", ",", "function_t2s", "=", "convert_bool_to_string", ",", ")" ]
Create a bool parameter :param help_string: :param default: :return:
[ "Create", "a", "bool", "parameter", ":", "param", "help_string", ":", ":", "param", "default", ":", ":", "return", ":" ]
python
train
31.1875
dlecocq/nsq-py
nsq/http/__init__.py
https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/http/__init__.py#L67-L77
def get(self, path, *args, **kwargs): '''GET the provided endpoint''' target = self._host.relative(path).utf8 if not isinstance(target, basestring): # on older versions of the `url` library, .utf8 is a method, not a property target = target() params = kwargs.get('params', {}) params.update(self._params) kwargs['params'] = params logger.debug('GET %s with %s, %s', target, args, kwargs) return requests.get(target, *args, **kwargs)
[ "def", "get", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "target", "=", "self", ".", "_host", ".", "relative", "(", "path", ")", ".", "utf8", "if", "not", "isinstance", "(", "target", ",", "basestring", ")", ":", "# on older versions of the `url` library, .utf8 is a method, not a property", "target", "=", "target", "(", ")", "params", "=", "kwargs", ".", "get", "(", "'params'", ",", "{", "}", ")", "params", ".", "update", "(", "self", ".", "_params", ")", "kwargs", "[", "'params'", "]", "=", "params", "logger", ".", "debug", "(", "'GET %s with %s, %s'", ",", "target", ",", "args", ",", "kwargs", ")", "return", "requests", ".", "get", "(", "target", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
GET the provided endpoint
[ "GET", "the", "provided", "endpoint" ]
python
train
46.363636
ungarj/mapchete
mapchete/tile.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/tile.py#L42-L60
def tile(self, zoom, row, col): """ Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` """ tile = self.tile_pyramid.tile(zoom, row, col) return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
[ "def", "tile", "(", "self", ",", "zoom", ",", "row", ",", "col", ")", ":", "tile", "=", "self", ".", "tile_pyramid", ".", "tile", "(", "zoom", ",", "row", ",", "col", ")", "return", "BufferedTile", "(", "tile", ",", "pixelbuffer", "=", "self", ".", "pixelbuffer", ")" ]
Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile``
[ "Return", "BufferedTile", "object", "of", "this", "BufferedTilePyramid", "." ]
python
valid
25.736842
horazont/aioxmpp
aioxmpp/stream.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L1680-L1736
def register_presence_callback(self, type_, from_, cb): """ Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "register_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.register_callback( type_, from_, cb, )
[ "def", "register_presence_callback", "(", "self", ",", "type_", ",", "from_", ",", "cb", ")", ":", "type_", "=", "self", ".", "_coerce_enum", "(", "type_", ",", "structs", ".", "PresenceType", ")", "warnings", ".", "warn", "(", "\"register_presence_callback is deprecated; use \"", "\"aioxmpp.dispatcher.SimplePresenceDispatcher or \"", "\"aioxmpp.PresenceClient instead\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "_xxx_presence_dispatcher", ".", "register_callback", "(", "type_", ",", "from_", ",", "cb", ",", ")" ]
Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
[ "Register", "a", "callback", "to", "be", "called", "when", "a", "presence", "stanza", "is", "received", "." ]
python
train
40.473684
pecan/pecan
pecan/util.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/util.py#L12-L54
def getargspec(method): """ Drill through layers of decorators attempting to locate the actual argspec for a method. """ argspec = _getargspec(method) args = argspec[0] if args and args[0] == 'self': return argspec if hasattr(method, '__func__'): method = method.__func__ func_closure = six.get_function_closure(method) # NOTE(sileht): if the closure is None we cannot look deeper, # so return actual argspec, this occurs when the method # is static for example. if not func_closure: return argspec closure = None # In the case of deeply nested decorators (with arguments), it's possible # that there are several callables in scope; Take a best guess and go # with the one that looks most like a pecan controller function # (has a __code__ object, and 'self' is the first argument) func_closure = filter( lambda c: ( six.callable(c.cell_contents) and hasattr(c.cell_contents, '__code__') ), func_closure ) func_closure = sorted( func_closure, key=lambda c: 'self' in c.cell_contents.__code__.co_varnames, reverse=True ) closure = func_closure[0] method = closure.cell_contents return getargspec(method)
[ "def", "getargspec", "(", "method", ")", ":", "argspec", "=", "_getargspec", "(", "method", ")", "args", "=", "argspec", "[", "0", "]", "if", "args", "and", "args", "[", "0", "]", "==", "'self'", ":", "return", "argspec", "if", "hasattr", "(", "method", ",", "'__func__'", ")", ":", "method", "=", "method", ".", "__func__", "func_closure", "=", "six", ".", "get_function_closure", "(", "method", ")", "# NOTE(sileht): if the closure is None we cannot look deeper,", "# so return actual argspec, this occurs when the method", "# is static for example.", "if", "not", "func_closure", ":", "return", "argspec", "closure", "=", "None", "# In the case of deeply nested decorators (with arguments), it's possible", "# that there are several callables in scope; Take a best guess and go", "# with the one that looks most like a pecan controller function", "# (has a __code__ object, and 'self' is the first argument)", "func_closure", "=", "filter", "(", "lambda", "c", ":", "(", "six", ".", "callable", "(", "c", ".", "cell_contents", ")", "and", "hasattr", "(", "c", ".", "cell_contents", ",", "'__code__'", ")", ")", ",", "func_closure", ")", "func_closure", "=", "sorted", "(", "func_closure", ",", "key", "=", "lambda", "c", ":", "'self'", "in", "c", ".", "cell_contents", ".", "__code__", ".", "co_varnames", ",", "reverse", "=", "True", ")", "closure", "=", "func_closure", "[", "0", "]", "method", "=", "closure", ".", "cell_contents", "return", "getargspec", "(", "method", ")" ]
Drill through layers of decorators attempting to locate the actual argspec for a method.
[ "Drill", "through", "layers", "of", "decorators", "attempting", "to", "locate", "the", "actual", "argspec", "for", "a", "method", "." ]
python
train
29.372093
Gandi/gandi.cli
gandi/cli/modules/docker.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/docker.py#L29-L48
def handle(cls, vm, args): """ Setup forwarding connection to given VM and pipe docker cmds over SSH. """ docker = Iaas.info(vm) if not docker: raise Exception('docker vm %s not found' % vm) if docker['state'] != 'running': Iaas.start(vm) # XXX remote_addr = docker['ifaces'][0]['ips'][0]['ip'] port = unixpipe.setup(remote_addr, 'root', '/var/run/docker.sock') os.environ['DOCKER_HOST'] = 'tcp://localhost:%d' % port cls.echo('using DOCKER_HOST=%s' % os.environ['DOCKER_HOST']) subprocess.call(['docker'] + list(args))
[ "def", "handle", "(", "cls", ",", "vm", ",", "args", ")", ":", "docker", "=", "Iaas", ".", "info", "(", "vm", ")", "if", "not", "docker", ":", "raise", "Exception", "(", "'docker vm %s not found'", "%", "vm", ")", "if", "docker", "[", "'state'", "]", "!=", "'running'", ":", "Iaas", ".", "start", "(", "vm", ")", "# XXX", "remote_addr", "=", "docker", "[", "'ifaces'", "]", "[", "0", "]", "[", "'ips'", "]", "[", "0", "]", "[", "'ip'", "]", "port", "=", "unixpipe", ".", "setup", "(", "remote_addr", ",", "'root'", ",", "'/var/run/docker.sock'", ")", "os", ".", "environ", "[", "'DOCKER_HOST'", "]", "=", "'tcp://localhost:%d'", "%", "port", "cls", ".", "echo", "(", "'using DOCKER_HOST=%s'", "%", "os", ".", "environ", "[", "'DOCKER_HOST'", "]", ")", "subprocess", ".", "call", "(", "[", "'docker'", "]", "+", "list", "(", "args", ")", ")" ]
Setup forwarding connection to given VM and pipe docker cmds over SSH.
[ "Setup", "forwarding", "connection", "to", "given", "VM", "and", "pipe", "docker", "cmds", "over", "SSH", "." ]
python
train
31.25
PmagPy/PmagPy
pmagpy/validate_upload3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload3.py#L191-L225
def cv(row, col_name, arg, current_data_model, df, con): """ row[col_name] must contain only values from the appropriate controlled vocabulary """ vocabulary = con.vocab.vocabularies cell_value = str(row[col_name]) if not cell_value: return None elif cell_value == "None": return None cell_values = cell_value.split(":") cell_values = [c.strip() for c in cell_values] # get possible values for controlled vocabulary # exclude weird unicode possible_values = [] for val in vocabulary[col_name]: try: possible_values.append(str(val).lower()) except UnicodeEncodeError as ex: print(val, ex) for value in cell_values: if str(value).lower() == "nan": continue elif str(value).lower() in possible_values: continue elif value.lower() == "none": continue else: try: if str(float(value)) in possible_values: continue except: pass return '"{}" is not in controlled vocabulary for {}'.format(value, arg) return None
[ "def", "cv", "(", "row", ",", "col_name", ",", "arg", ",", "current_data_model", ",", "df", ",", "con", ")", ":", "vocabulary", "=", "con", ".", "vocab", ".", "vocabularies", "cell_value", "=", "str", "(", "row", "[", "col_name", "]", ")", "if", "not", "cell_value", ":", "return", "None", "elif", "cell_value", "==", "\"None\"", ":", "return", "None", "cell_values", "=", "cell_value", ".", "split", "(", "\":\"", ")", "cell_values", "=", "[", "c", ".", "strip", "(", ")", "for", "c", "in", "cell_values", "]", "# get possible values for controlled vocabulary", "# exclude weird unicode", "possible_values", "=", "[", "]", "for", "val", "in", "vocabulary", "[", "col_name", "]", ":", "try", ":", "possible_values", ".", "append", "(", "str", "(", "val", ")", ".", "lower", "(", ")", ")", "except", "UnicodeEncodeError", "as", "ex", ":", "print", "(", "val", ",", "ex", ")", "for", "value", "in", "cell_values", ":", "if", "str", "(", "value", ")", ".", "lower", "(", ")", "==", "\"nan\"", ":", "continue", "elif", "str", "(", "value", ")", ".", "lower", "(", ")", "in", "possible_values", ":", "continue", "elif", "value", ".", "lower", "(", ")", "==", "\"none\"", ":", "continue", "else", ":", "try", ":", "if", "str", "(", "float", "(", "value", ")", ")", "in", "possible_values", ":", "continue", "except", ":", "pass", "return", "'\"{}\" is not in controlled vocabulary for {}'", ".", "format", "(", "value", ",", "arg", ")", "return", "None" ]
row[col_name] must contain only values from the appropriate controlled vocabulary
[ "row", "[", "col_name", "]", "must", "contain", "only", "values", "from", "the", "appropriate", "controlled", "vocabulary" ]
python
train
32.628571
rhayes777/PyAutoFit
autofit/conf.py
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/conf.py#L175-L192
def has(self, module_name, class_name, attribute_name): """ Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute """ self.read(module_name) return self.parser.has_option(class_name, attribute_name)
[ "def", "has", "(", "self", ",", "module_name", ",", "class_name", ",", "attribute_name", ")", ":", "self", ".", "read", "(", "module_name", ")", "return", "self", ".", "parser", ".", "has_option", "(", "class_name", ",", "attribute_name", ")" ]
Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute
[ "Parameters", "----------", "module_name", ":", "String", "The", "analysis_path", "of", "the", "module", "class_name", ":", "String", "The", "analysis_path", "of", "the", "class", "attribute_name", ":", "String", "The", "analysis_path", "of", "the", "attribute" ]
python
train
30.333333
adobe-apiplatform/umapi-client.py
umapi_client/functional.py
https://github.com/adobe-apiplatform/umapi-client.py/blob/1c446d79643cc8615adaa23e12dce3ac5782cf76/umapi_client/functional.py#L263-L271
def remove_from_organization(self, delete_account=False): """ Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another. """ self.append(removeFromOrg={"deleteAccount": True if delete_account else False}) return None
[ "def", "remove_from_organization", "(", "self", ",", "delete_account", "=", "False", ")", ":", "self", ".", "append", "(", "removeFromOrg", "=", "{", "\"deleteAccount\"", ":", "True", "if", "delete_account", "else", "False", "}", ")", "return", "None" ]
Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another.
[ "Remove", "a", "user", "from", "the", "organization", "s", "list", "of", "visible", "users", ".", "Optionally", "also", "delete", "the", "account", ".", "Deleting", "the", "account", "can", "only", "be", "done", "if", "the", "organization", "owns", "the", "account", "s", "domain", ".", ":", "param", "delete_account", ":", "Whether", "to", "delete", "the", "account", "after", "removing", "from", "the", "organization", "(", "default", "false", ")", ":", "return", ":", "None", "because", "you", "cannot", "follow", "this", "command", "with", "another", "." ]
python
train
63.333333
aio-libs/aioredis
aioredis/commands/string.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/string.py#L74-L81
def decrby(self, key, decrement): """Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int """ if not isinstance(decrement, int): raise TypeError("decrement must be of type int") return self.execute(b'DECRBY', key, decrement)
[ "def", "decrby", "(", "self", ",", "key", ",", "decrement", ")", ":", "if", "not", "isinstance", "(", "decrement", ",", "int", ")", ":", "raise", "TypeError", "(", "\"decrement must be of type int\"", ")", "return", "self", ".", "execute", "(", "b'DECRBY'", ",", "key", ",", "decrement", ")" ]
Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int
[ "Decrement", "the", "integer", "value", "of", "a", "key", "by", "the", "given", "number", "." ]
python
train
39.75
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L189-L218
def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
[ "def", "_estimate_runner_memory", "(", "json_file", ")", ":", "with", "open", "(", "json_file", ")", "as", "in_handle", ":", "sinfo", "=", "json", ".", "load", "(", "in_handle", ")", "num_parallel", "=", "1", "for", "key", "in", "[", "\"config__algorithm__variantcaller\"", ",", "\"description\"", "]", ":", "item_counts", "=", "[", "]", "n", "=", "0", "for", "val", "in", "(", "sinfo", ".", "get", "(", "key", ")", "or", "[", "]", ")", ":", "n", "+=", "1", "if", "val", ":", "if", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", ":", "item_counts", ".", "append", "(", "len", "(", "val", ")", ")", "else", ":", "item_counts", ".", "append", "(", "1", ")", "print", "(", "key", ",", "n", ",", "item_counts", ")", "if", "n", "and", "item_counts", ":", "num_parallel", "=", "n", "*", "max", "(", "item_counts", ")", "break", "if", "num_parallel", "<", "25", ":", "return", "\"3g\"", "if", "num_parallel", "<", "150", ":", "return", "\"6g\"", "elif", "num_parallel", "<", "500", ":", "return", "\"12g\"", "else", ":", "return", "\"24g\"" ]
Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell.
[ "Estimate", "Java", "memory", "requirements", "based", "on", "number", "of", "samples", "." ]
python
train
30.533333
apache/incubator-mxnet
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L282-L289
def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs
[ "def", "_elu", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "'alpha'", "in", "attrs", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'alpha'", ":", "'slope'", "}", ")", "else", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'slope'", ":", "1.0", "}", ")", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "new_attrs", ",", "{", "'act_type'", ":", "'elu'", "}", ")", "return", "'LeakyReLU'", ",", "new_attrs", ",", "inputs" ]
Elu function
[ "Elu", "function" ]
python
train
48.25
astrocatalogs/astrocats
astrocats/catalog/photometry.py
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/photometry.py#L421-L459
def set_pd_mag_from_counts(photodict, c='', ec='', lec='', uec='', zp=DEFAULT_ZP, sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a counts measurement.""" with localcontext() as ctx: if lec == '' or uec == '': lec = ec uec = ec prec = max( get_sig_digits(str(c), strip_zeroes=False), get_sig_digits(str(lec), strip_zeroes=False), get_sig_digits(str(uec), strip_zeroes=False)) + 1 ctx.prec = prec dlec = Decimal(str(lec)) duec = Decimal(str(uec)) if c != '': dc = Decimal(str(c)) dzp = Decimal(str(zp)) dsig = Decimal(str(sig)) photodict[PHOTOMETRY.ZERO_POINT] = str(zp) if c == '' or float(c) < float(sig) * float(uec): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - (D25 * (dsig * duec ).log10())) dnec = Decimal('10.0') ** ( (dzp - Decimal(photodict[PHOTOMETRY.MAGNITUDE])) / D25) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dnec + duec).log10() - dnec.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - D25 * dc.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dc + duec).log10() - dc.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dc.log10() - (dc - dlec).log10()))
[ "def", "set_pd_mag_from_counts", "(", "photodict", ",", "c", "=", "''", ",", "ec", "=", "''", ",", "lec", "=", "''", ",", "uec", "=", "''", ",", "zp", "=", "DEFAULT_ZP", ",", "sig", "=", "DEFAULT_UL_SIGMA", ")", ":", "with", "localcontext", "(", ")", "as", "ctx", ":", "if", "lec", "==", "''", "or", "uec", "==", "''", ":", "lec", "=", "ec", "uec", "=", "ec", "prec", "=", "max", "(", "get_sig_digits", "(", "str", "(", "c", ")", ",", "strip_zeroes", "=", "False", ")", ",", "get_sig_digits", "(", "str", "(", "lec", ")", ",", "strip_zeroes", "=", "False", ")", ",", "get_sig_digits", "(", "str", "(", "uec", ")", ",", "strip_zeroes", "=", "False", ")", ")", "+", "1", "ctx", ".", "prec", "=", "prec", "dlec", "=", "Decimal", "(", "str", "(", "lec", ")", ")", "duec", "=", "Decimal", "(", "str", "(", "uec", ")", ")", "if", "c", "!=", "''", ":", "dc", "=", "Decimal", "(", "str", "(", "c", ")", ")", "dzp", "=", "Decimal", "(", "str", "(", "zp", ")", ")", "dsig", "=", "Decimal", "(", "str", "(", "sig", ")", ")", "photodict", "[", "PHOTOMETRY", ".", "ZERO_POINT", "]", "=", "str", "(", "zp", ")", "if", "c", "==", "''", "or", "float", "(", "c", ")", "<", "float", "(", "sig", ")", "*", "float", "(", "uec", ")", ":", "photodict", "[", "PHOTOMETRY", ".", "UPPER_LIMIT", "]", "=", "True", "photodict", "[", "PHOTOMETRY", ".", "UPPER_LIMIT_SIGMA", "]", "=", "str", "(", "sig", ")", "photodict", "[", "PHOTOMETRY", ".", "MAGNITUDE", "]", "=", "str", "(", "dzp", "-", "(", "D25", "*", "(", "dsig", "*", "duec", ")", ".", "log10", "(", ")", ")", ")", "dnec", "=", "Decimal", "(", "'10.0'", ")", "**", "(", "(", "dzp", "-", "Decimal", "(", "photodict", "[", "PHOTOMETRY", ".", "MAGNITUDE", "]", ")", ")", "/", "D25", ")", "photodict", "[", "PHOTOMETRY", ".", "E_UPPER_MAGNITUDE", "]", "=", "str", "(", "D25", "*", "(", "(", "dnec", "+", "duec", ")", ".", "log10", "(", ")", "-", "dnec", ".", "log10", "(", ")", ")", ")", "else", ":", "photodict", "[", "PHOTOMETRY", ".", "MAGNITUDE", "]", "=", "str", "(", "dzp", "-", "D25", "*", "dc", ".", "log10", "(", ")", ")", "photodict", "[", "PHOTOMETRY", ".", "E_UPPER_MAGNITUDE", "]", "=", "str", "(", "D25", "*", "(", "(", "dc", "+", "duec", ")", ".", "log10", "(", ")", "-", "dc", ".", "log10", "(", ")", ")", ")", "photodict", "[", "PHOTOMETRY", ".", "E_LOWER_MAGNITUDE", "]", "=", "str", "(", "D25", "*", "(", "dc", ".", "log10", "(", ")", "-", "(", "dc", "-", "dlec", ")", ".", "log10", "(", ")", ")", ")" ]
Set photometry dictionary from a counts measurement.
[ "Set", "photometry", "dictionary", "from", "a", "counts", "measurement", "." ]
python
train
44.487179
radjkarl/fancyTools
fancytools/math/scale.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/scale.py#L2-L15
def scale(arr, mn=0, mx=1): """ Apply min-max scaling (normalize) then scale to (mn,mx) """ amn = arr.min() amx = arr.max() # normalize: arr = (arr - amn) / (amx - amn) # scale: if amn != mn or amx != mx: arr *= mx - mn arr += mn return arr
[ "def", "scale", "(", "arr", ",", "mn", "=", "0", ",", "mx", "=", "1", ")", ":", "amn", "=", "arr", ".", "min", "(", ")", "amx", "=", "arr", ".", "max", "(", ")", "# normalize:", "arr", "=", "(", "arr", "-", "amn", ")", "/", "(", "amx", "-", "amn", ")", "# scale:", "if", "amn", "!=", "mn", "or", "amx", "!=", "mx", ":", "arr", "*=", "mx", "-", "mn", "arr", "+=", "mn", "return", "arr" ]
Apply min-max scaling (normalize) then scale to (mn,mx)
[ "Apply", "min", "-", "max", "scaling", "(", "normalize", ")", "then", "scale", "to", "(", "mn", "mx", ")" ]
python
train
20.5
quantopian/zipline
zipline/pipeline/factors/factor.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1393-L1404
def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
[ "def", "_compute", "(", "self", ",", "arrays", ",", "dates", ",", "assets", ",", "mask", ")", ":", "return", "masked_rankdata_2d", "(", "arrays", "[", "0", "]", ",", "mask", ",", "self", ".", "inputs", "[", "0", "]", ".", "missing_value", ",", "self", ".", "_method", ",", "self", ".", "_ascending", ",", ")" ]
For each row in the input, compute a like-shaped array of per-row ranks.
[ "For", "each", "row", "in", "the", "input", "compute", "a", "like", "-", "shaped", "array", "of", "per", "-", "row", "ranks", "." ]
python
train
27.75
ASMfreaK/habitipy
habitipy/util.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L166-L169
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)): """finds and installs translation functions for package""" translation = get_translation_for(package_name) return [getattr(translation, x) for x in names]
[ "def", "get_translation_functions", "(", "package_name", ":", "str", ",", "names", ":", "Tuple", "[", "str", ",", "...", "]", "=", "(", "'gettext'", ",", ")", ")", ":", "translation", "=", "get_translation_for", "(", "package_name", ")", "return", "[", "getattr", "(", "translation", ",", "x", ")", "for", "x", "in", "names", "]" ]
finds and installs translation functions for package
[ "finds", "and", "installs", "translation", "functions", "for", "package" ]
python
train
63
waqasbhatti/astrobase
astrobase/varclass/varfeatures.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/varfeatures.py#L544-L662
def gilliland_cdpp(times, mags, errs, windowlength=97, polyorder=2, binsize=23400, # in seconds: 6.5 hours for classic CDPP sigclip=5.0, magsarefluxes=False, **kwargs): '''This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value. ''' # if no errs are given, assume 0.1% errors if errs is None: errs = 0.001*mags # get rid of nans first find = npisfinite(times) & npisfinite(mags) & npisfinite(errs) ftimes = times[find] fmags = mags[find] ferrs = errs[find] if ftimes.size < (3*windowlength): LOGERROR('not enough LC points to calculate CDPP') return npnan # now get the smoothed mag series using the filter # kwargs are provided to the savgol_filter function smoothed = savgol_filter(fmags, windowlength, polyorder, **kwargs) subtracted = fmags - smoothed # sigclip the subtracted light curve stimes, smags, serrs = sigclip_magseries(ftimes, subtracted, ferrs, magsarefluxes=magsarefluxes) # bin over 6.5 hour bins and throw away all bins with less than 7 elements binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=binsize, minbinelems=7) bmags = binned['binnedmags'] # stdev of bin mags x 1.168 -> CDPP cdpp = npstd(bmags) * 1.168 return cdpp
[ "def", "gilliland_cdpp", "(", "times", ",", "mags", ",", "errs", ",", "windowlength", "=", "97", ",", "polyorder", "=", "2", ",", "binsize", "=", "23400", ",", "# in seconds: 6.5 hours for classic CDPP", "sigclip", "=", "5.0", ",", "magsarefluxes", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# if no errs are given, assume 0.1% errors", "if", "errs", "is", "None", ":", "errs", "=", "0.001", "*", "mags", "# get rid of nans first", "find", "=", "npisfinite", "(", "times", ")", "&", "npisfinite", "(", "mags", ")", "&", "npisfinite", "(", "errs", ")", "ftimes", "=", "times", "[", "find", "]", "fmags", "=", "mags", "[", "find", "]", "ferrs", "=", "errs", "[", "find", "]", "if", "ftimes", ".", "size", "<", "(", "3", "*", "windowlength", ")", ":", "LOGERROR", "(", "'not enough LC points to calculate CDPP'", ")", "return", "npnan", "# now get the smoothed mag series using the filter", "# kwargs are provided to the savgol_filter function", "smoothed", "=", "savgol_filter", "(", "fmags", ",", "windowlength", ",", "polyorder", ",", "*", "*", "kwargs", ")", "subtracted", "=", "fmags", "-", "smoothed", "# sigclip the subtracted light curve", "stimes", ",", "smags", ",", "serrs", "=", "sigclip_magseries", "(", "ftimes", ",", "subtracted", ",", "ferrs", ",", "magsarefluxes", "=", "magsarefluxes", ")", "# bin over 6.5 hour bins and throw away all bins with less than 7 elements", "binned", "=", "time_bin_magseries_with_errs", "(", "stimes", ",", "smags", ",", "serrs", ",", "binsize", "=", "binsize", ",", "minbinelems", "=", "7", ")", "bmags", "=", "binned", "[", "'binnedmags'", "]", "# stdev of bin mags x 1.168 -> CDPP", "cdpp", "=", "npstd", "(", "bmags", ")", "*", "1.168", "return", "cdpp" ]
This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value.
[ "This", "calculates", "the", "CDPP", "of", "a", "timeseries", "using", "the", "method", "in", "the", "paper", ":" ]
python
valid
34.05042
Duke-GCB/DukeDSClient
ddsc/core/download.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L366-L379
def download_file_part_run(download_context): """ Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create. """ destination_dir, file_url_data_dict, seek_amt, bytes_to_read = download_context.params project_file = ProjectFile(file_url_data_dict) local_path = project_file.get_local_path(destination_dir) retry_chunk_downloader = RetryChunkDownloader(project_file, local_path, seek_amt, bytes_to_read, download_context) retry_chunk_downloader.run() return 'ok'
[ "def", "download_file_part_run", "(", "download_context", ")", ":", "destination_dir", ",", "file_url_data_dict", ",", "seek_amt", ",", "bytes_to_read", "=", "download_context", ".", "params", "project_file", "=", "ProjectFile", "(", "file_url_data_dict", ")", "local_path", "=", "project_file", ".", "get_local_path", "(", "destination_dir", ")", "retry_chunk_downloader", "=", "RetryChunkDownloader", "(", "project_file", ",", "local_path", ",", "seek_amt", ",", "bytes_to_read", ",", "download_context", ")", "retry_chunk_downloader", ".", "run", "(", ")", "return", "'ok'" ]
Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create.
[ "Function", "run", "by", "CreateProjectCommand", "to", "create", "the", "project", ".", "Runs", "in", "a", "background", "process", ".", ":", "param", "download_context", ":", "UploadContext", ":", "contains", "data", "service", "setup", "and", "project", "name", "to", "create", "." ]
python
train
51.285714
datastax/python-driver
cassandra/encoder.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/encoder.py#L227-L235
def cql_encode_all_types(self, val, as_text_type=False): """ Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) if as_text_type and not isinstance(encoded, six.text_type): return encoded.decode('utf-8') return encoded
[ "def", "cql_encode_all_types", "(", "self", ",", "val", ",", "as_text_type", "=", "False", ")", ":", "encoded", "=", "self", ".", "mapping", ".", "get", "(", "type", "(", "val", ")", ",", "self", ".", "cql_encode_object", ")", "(", "val", ")", "if", "as_text_type", "and", "not", "isinstance", "(", "encoded", ",", "six", ".", "text_type", ")", ":", "return", "encoded", ".", "decode", "(", "'utf-8'", ")", "return", "encoded" ]
Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type.
[ "Converts", "any", "type", "into", "a", "CQL", "string", "defaulting", "to", "cql_encode_object", "if", ":", "attr", ":", "~Encoder", ".", "mapping", "does", "not", "contain", "an", "entry", "for", "the", "type", "." ]
python
train
48.666667
GoogleCloudPlatform/google-cloud-datastore
python/googledatastore/helper.py
https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L357-L378
def set_property_filter(filter_proto, name, op, value): """Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' """ filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
[ "def", "set_property_filter", "(", "filter_proto", ",", "name", ",", "op", ",", "value", ")", ":", "filter_proto", ".", "Clear", "(", ")", "pf", "=", "filter_proto", ".", "property_filter", "pf", ".", "property", ".", "name", "=", "name", "pf", ".", "op", "=", "op", "set_value", "(", "pf", ".", "value", ",", "value", ")", "return", "filter_proto" ]
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
[ "Set", "property", "filter", "contraint", "in", "the", "given", "datastore", ".", "Filter", "proto", "message", "." ]
python
train
26.818182
budacom/trading-bots
trading_bots/contrib/converters/base.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/converters/base.py#L83-L86
def convert_money(self, money: Money, to: str, reverse: bool=False) -> Money: """Convert money to another currency""" converted = self.convert(money.amount, money.currency, to, reverse) return Money(converted, to)
[ "def", "convert_money", "(", "self", ",", "money", ":", "Money", ",", "to", ":", "str", ",", "reverse", ":", "bool", "=", "False", ")", "->", "Money", ":", "converted", "=", "self", ".", "convert", "(", "money", ".", "amount", ",", "money", ".", "currency", ",", "to", ",", "reverse", ")", "return", "Money", "(", "converted", ",", "to", ")" ]
Convert money to another currency
[ "Convert", "money", "to", "another", "currency" ]
python
train
58.5
Scoppio/RagnarokEngine3
Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py#L860-L871
def is_identity(): """Check to see if this matrix is an identity matrix.""" for index, row in enumerate(self.dta): if row[index] == 1: for num, element in enumerate(row): if num != index: if element != 0: return False else: return False return True
[ "def", "is_identity", "(", ")", ":", "for", "index", ",", "row", "in", "enumerate", "(", "self", ".", "dta", ")", ":", "if", "row", "[", "index", "]", "==", "1", ":", "for", "num", ",", "element", "in", "enumerate", "(", "row", ")", ":", "if", "num", "!=", "index", ":", "if", "element", "!=", "0", ":", "return", "False", "else", ":", "return", "False", "return", "True" ]
Check to see if this matrix is an identity matrix.
[ "Check", "to", "see", "if", "this", "matrix", "is", "an", "identity", "matrix", "." ]
python
train
32.5
quantopian/zipline
zipline/utils/functional.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L145-L187
def _gen_unzip(it, elem_len): """Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. """ elem = next(it) first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem
[ "def", "_gen_unzip", "(", "it", ",", "elem_len", ")", ":", "elem", "=", "next", "(", "it", ")", "first_elem_len", "=", "len", "(", "elem", ")", "if", "elem_len", "is", "not", "None", "and", "elem_len", "!=", "first_elem_len", ":", "raise", "ValueError", "(", "'element at index 0 was length %d, expected %d'", "%", "(", "first_elem_len", ",", "elem_len", ",", ")", ")", "else", ":", "elem_len", "=", "first_elem_len", "yield", "elem", "for", "n", ",", "elem", "in", "enumerate", "(", "it", ",", "1", ")", ":", "if", "len", "(", "elem", ")", "!=", "elem_len", ":", "raise", "ValueError", "(", "'element at index %d was length %d, expected %d'", "%", "(", "n", ",", "len", "(", "elem", ")", ",", "elem_len", ",", ")", ",", ")", "yield", "elem" ]
Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``.
[ "Helper", "for", "unzip", "which", "checks", "the", "lengths", "of", "each", "element", "in", "it", ".", "Parameters", "----------", "it", ":", "iterable", "[", "tuple", "]", "An", "iterable", "of", "tuples", ".", "unzip", "should", "map", "ensure", "that", "these", "are", "already", "tuples", ".", "elem_len", ":", "int", "or", "None", "The", "expected", "element", "length", ".", "If", "this", "is", "None", "it", "is", "infered", "from", "the", "length", "of", "the", "first", "element", ".", "Yields", "------", "elem", ":", "tuple", "Each", "element", "of", "it", ".", "Raises", "------", "ValueError", "Raised", "when", "the", "lengths", "do", "not", "match", "the", "elem_len", "." ]
python
train
27.465116
Kane610/axis
axis/streammanager.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/streammanager.py#L52-L67
def session_callback(self, signal): """Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device. """ if signal == SIGNAL_DATA: self.event.new_event(self.data) elif signal == SIGNAL_FAILED: self.retry() if signal in [SIGNAL_PLAYING, SIGNAL_FAILED] and \ self.connection_status_callback: self.connection_status_callback(signal)
[ "def", "session_callback", "(", "self", ",", "signal", ")", ":", "if", "signal", "==", "SIGNAL_DATA", ":", "self", ".", "event", ".", "new_event", "(", "self", ".", "data", ")", "elif", "signal", "==", "SIGNAL_FAILED", ":", "self", ".", "retry", "(", ")", "if", "signal", "in", "[", "SIGNAL_PLAYING", ",", "SIGNAL_FAILED", "]", "and", "self", ".", "connection_status_callback", ":", "self", ".", "connection_status_callback", "(", "signal", ")" ]
Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device.
[ "Signalling", "from", "stream", "session", "." ]
python
train
32.6875
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/control_client/servicebusservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/servicebusservice.py#L625-L642
def list_subscriptions(self, topic_name): ''' Retrieves the subscriptions in the specified topic. topic_name: Name of the topic. ''' _validate_not_none('topic_name', topic_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _str(topic_name) + '/subscriptions/' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _ETreeXmlToObject.convert_response_to_feeds( response, _convert_etree_element_to_subscription)
[ "def", "list_subscriptions", "(", "self", ",", "topic_name", ")", ":", "_validate_not_none", "(", "'topic_name'", ",", "topic_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/'", "+", "_str", "(", "topic_name", ")", "+", "'/subscriptions/'", "request", ".", "path", ",", "request", ".", "query", "=", "self", ".", "_httpclient", ".", "_update_request_uri_query", "(", "request", ")", "# pylint: disable=protected-access", "request", ".", "headers", "=", "self", ".", "_update_service_bus_header", "(", "request", ")", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_ETreeXmlToObject", ".", "convert_response_to_feeds", "(", "response", ",", "_convert_etree_element_to_subscription", ")" ]
Retrieves the subscriptions in the specified topic. topic_name: Name of the topic.
[ "Retrieves", "the", "subscriptions", "in", "the", "specified", "topic", "." ]
python
test
41.555556
pydsigner/pygu
pygu/common.py
https://github.com/pydsigner/pygu/blob/09fe71534900933908ab83db12f5659b7827e31c/pygu/common.py#L17-L23
def center_blit(target, source, dest = (0, 0), area=None, special_flags=0): ''' Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset. ''' loc = lambda d, s: _vec(d.get_size()) / 2 - _vec(s.get_size()) / 2 _blitter(loc, target, source, dest, area, special_flags)
[ "def", "center_blit", "(", "target", ",", "source", ",", "dest", "=", "(", "0", ",", "0", ")", ",", "area", "=", "None", ",", "special_flags", "=", "0", ")", ":", "loc", "=", "lambda", "d", ",", "s", ":", "_vec", "(", "d", ".", "get_size", "(", ")", ")", "/", "2", "-", "_vec", "(", "s", ".", "get_size", "(", ")", ")", "/", "2", "_blitter", "(", "loc", ",", "target", ",", "source", ",", "dest", ",", "area", ",", "special_flags", ")" ]
Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset.
[ "Blits", "surface" ]
python
train
51.142857
angr/angr
angr/concretization_strategies/__init__.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/concretization_strategies/__init__.py#L45-L49
def _range(self, memory, addr, **kwargs): """ Gets the (min, max) range of solutions for an address. """ return (self._min(memory, addr, **kwargs), self._max(memory, addr, **kwargs))
[ "def", "_range", "(", "self", ",", "memory", ",", "addr", ",", "*", "*", "kwargs", ")", ":", "return", "(", "self", ".", "_min", "(", "memory", ",", "addr", ",", "*", "*", "kwargs", ")", ",", "self", ".", "_max", "(", "memory", ",", "addr", ",", "*", "*", "kwargs", ")", ")" ]
Gets the (min, max) range of solutions for an address.
[ "Gets", "the", "(", "min", "max", ")", "range", "of", "solutions", "for", "an", "address", "." ]
python
train
42
RedHatInsights/insights-core
insights/core/dr.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L237-L245
def get_name(component): """ Attempt to get the string name of component, including module and class if applicable. """ if six.callable(component): name = getattr(component, "__qualname__", component.__name__) return '.'.join([component.__module__, name]) return str(component)
[ "def", "get_name", "(", "component", ")", ":", "if", "six", ".", "callable", "(", "component", ")", ":", "name", "=", "getattr", "(", "component", ",", "\"__qualname__\"", ",", "component", ".", "__name__", ")", "return", "'.'", ".", "join", "(", "[", "component", ".", "__module__", ",", "name", "]", ")", "return", "str", "(", "component", ")" ]
Attempt to get the string name of component, including module and class if applicable.
[ "Attempt", "to", "get", "the", "string", "name", "of", "component", "including", "module", "and", "class", "if", "applicable", "." ]
python
train
34.333333
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L5498-L5680
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.year = None else: self.year = vals[i] i += 1 if len(vals[i]) == 0: self.month = None else: self.month = vals[i] i += 1 if len(vals[i]) == 0: self.day = None else: self.day = vals[i] i += 1 if len(vals[i]) == 0: self.hour = None else: self.hour = vals[i] i += 1 if len(vals[i]) == 0: self.minute = None else: self.minute = vals[i] i += 1 if len(vals[i]) == 0: self.data_source_and_uncertainty_flags = None else: self.data_source_and_uncertainty_flags = vals[i] i += 1 if len(vals[i]) == 0: self.dry_bulb_temperature = None else: self.dry_bulb_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.dew_point_temperature = None else: self.dew_point_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.relative_humidity = None else: self.relative_humidity = vals[i] i += 1 if len(vals[i]) == 0: self.atmospheric_station_pressure = None else: self.atmospheric_station_pressure = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_horizontal_radiation = None else: self.extraterrestrial_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_direct_normal_radiation = None else: self.extraterrestrial_direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.horizontal_infrared_radiation_intensity = None else: self.horizontal_infrared_radiation_intensity = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_radiation = None else: self.global_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_radiation = None else: self.direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_radiation = None else: self.diffuse_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_illuminance = None else: self.global_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_illuminance = None else: self.direct_normal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_illuminance = None else: self.diffuse_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.zenith_luminance = None else: self.zenith_luminance = vals[i] i += 1 if len(vals[i]) == 0: self.wind_direction = None else: self.wind_direction = vals[i] i += 1 if len(vals[i]) == 0: self.wind_speed = None else: self.wind_speed = vals[i] i += 1 if len(vals[i]) == 0: self.total_sky_cover = None else: self.total_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.opaque_sky_cover = None else: self.opaque_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.visibility = None else: self.visibility = vals[i] i += 1 if len(vals[i]) == 0: self.ceiling_height = None else: self.ceiling_height = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_observation = None else: self.present_weather_observation = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_codes = None else: self.present_weather_codes = vals[i] i += 1 if len(vals[i]) == 0: self.precipitable_water = None else: self.precipitable_water = vals[i] i += 1 if len(vals[i]) == 0: self.aerosol_optical_depth = None else: self.aerosol_optical_depth = vals[i] i += 1 if len(vals[i]) == 0: self.snow_depth = None else: self.snow_depth = vals[i] i += 1 if len(vals[i]) == 0: self.days_since_last_snowfall = None else: self.days_since_last_snowfall = vals[i] i += 1 if len(vals[i]) == 0: self.albedo = None else: self.albedo = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_depth = None else: self.liquid_precipitation_depth = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_quantity = None else: self.liquid_precipitation_quantity = vals[i] i += 1
[ "def", "read", "(", "self", ",", "vals", ")", ":", "i", "=", "0", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "year", "=", "None", "else", ":", "self", ".", "year", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "month", "=", "None", "else", ":", "self", ".", "month", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "day", "=", "None", "else", ":", "self", ".", "day", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hour", "=", "None", "else", ":", "self", ".", "hour", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "minute", "=", "None", "else", ":", "self", ".", "minute", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "data_source_and_uncertainty_flags", "=", "None", "else", ":", "self", ".", "data_source_and_uncertainty_flags", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dry_bulb_temperature", "=", "None", "else", ":", "self", ".", "dry_bulb_temperature", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dew_point_temperature", "=", "None", "else", ":", "self", ".", "dew_point_temperature", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "relative_humidity", "=", "None", "else", ":", "self", ".", "relative_humidity", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "atmospheric_station_pressure", "=", "None", "else", ":", "self", ".", "atmospheric_station_pressure", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "extraterrestrial_horizontal_radiation", "=", "None", "else", ":", "self", ".", "extraterrestrial_horizontal_radiation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "extraterrestrial_direct_normal_radiation", "=", "None", "else", ":", "self", ".", "extraterrestrial_direct_normal_radiation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "horizontal_infrared_radiation_intensity", "=", "None", "else", ":", "self", ".", "horizontal_infrared_radiation_intensity", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "global_horizontal_radiation", "=", "None", "else", ":", "self", ".", "global_horizontal_radiation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "direct_normal_radiation", "=", "None", "else", ":", "self", ".", "direct_normal_radiation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "diffuse_horizontal_radiation", "=", "None", "else", ":", "self", ".", "diffuse_horizontal_radiation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "global_horizontal_illuminance", "=", "None", "else", ":", "self", ".", "global_horizontal_illuminance", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "direct_normal_illuminance", "=", "None", "else", ":", "self", ".", "direct_normal_illuminance", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "diffuse_horizontal_illuminance", "=", "None", "else", ":", "self", ".", "diffuse_horizontal_illuminance", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "zenith_luminance", "=", "None", "else", ":", "self", ".", "zenith_luminance", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wind_direction", "=", "None", "else", ":", "self", ".", "wind_direction", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wind_speed", "=", "None", "else", ":", "self", ".", "wind_speed", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "total_sky_cover", "=", "None", "else", ":", "self", ".", "total_sky_cover", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "opaque_sky_cover", "=", "None", "else", ":", "self", ".", "opaque_sky_cover", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "visibility", "=", "None", "else", ":", "self", ".", "visibility", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ceiling_height", "=", "None", "else", ":", "self", ".", "ceiling_height", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "present_weather_observation", "=", "None", "else", ":", "self", ".", "present_weather_observation", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "present_weather_codes", "=", "None", "else", ":", "self", ".", "present_weather_codes", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "precipitable_water", "=", "None", "else", ":", "self", ".", "precipitable_water", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "aerosol_optical_depth", "=", "None", "else", ":", "self", ".", "aerosol_optical_depth", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "snow_depth", "=", "None", "else", ":", "self", ".", "snow_depth", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "days_since_last_snowfall", "=", "None", "else", ":", "self", ".", "days_since_last_snowfall", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "albedo", "=", "None", "else", ":", "self", ".", "albedo", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "liquid_precipitation_depth", "=", "None", "else", ":", "self", ".", "liquid_precipitation_depth", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "liquid_precipitation_quantity", "=", "None", "else", ":", "self", ".", "liquid_precipitation_quantity", "=", "vals", "[", "i", "]", "i", "+=", "1" ]
Read values. Args: vals (list): list of strings representing values
[ "Read", "values", "." ]
python
train
28.84153
twilio/twilio-python
twilio/rest/verify/v2/service/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/verify/v2/service/__init__.py#L517-L543
def update(self, friendly_name=values.unset, code_length=values.unset, lookup_enabled=values.unset, skip_sms_to_landlines=values.unset, dtmf_input_required=values.unset, tts_name=values.unset, psd2_enabled=values.unset): """ Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance """ return self._proxy.update( friendly_name=friendly_name, code_length=code_length, lookup_enabled=lookup_enabled, skip_sms_to_landlines=skip_sms_to_landlines, dtmf_input_required=dtmf_input_required, tts_name=tts_name, psd2_enabled=psd2_enabled, )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "code_length", "=", "values", ".", "unset", ",", "lookup_enabled", "=", "values", ".", "unset", ",", "skip_sms_to_landlines", "=", "values", ".", "unset", ",", "dtmf_input_required", "=", "values", ".", "unset", ",", "tts_name", "=", "values", ".", "unset", ",", "psd2_enabled", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "friendly_name", "=", "friendly_name", ",", "code_length", "=", "code_length", ",", "lookup_enabled", "=", "lookup_enabled", ",", "skip_sms_to_landlines", "=", "skip_sms_to_landlines", ",", "dtmf_input_required", "=", "dtmf_input_required", ",", "tts_name", "=", "tts_name", ",", "psd2_enabled", "=", "psd2_enabled", ",", ")" ]
Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance
[ "Update", "the", "ServiceInstance" ]
python
train
53.62963
davidcarboni/Flask-Sleuth
sleuth/__init__.py
https://github.com/davidcarboni/Flask-Sleuth/blob/2191aa2a929ec43c0176ec51c7abef924b12d015/sleuth/__init__.py#L71-L88
def _tracing_information(): """Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. """ # We'll collate trace information if the B3 headers have been collected: values = b3.values() if values[b3.b3_trace_id]: # Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1 # However we're not currently using Zipkin, so it's always false # exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false" return [ current_app.name if current_app.name else " - ", values[b3.b3_trace_id], values[b3.b3_span_id], "false", ]
[ "def", "_tracing_information", "(", ")", ":", "# We'll collate trace information if the B3 headers have been collected:", "values", "=", "b3", ".", "values", "(", ")", "if", "values", "[", "b3", ".", "b3_trace_id", "]", ":", "# Trace information would normally be sent to Zipkin if either of sampled or debug (\"flags\") is set to 1", "# However we're not currently using Zipkin, so it's always false", "# exported = \"true\" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else \"false\"", "return", "[", "current_app", ".", "name", "if", "current_app", ".", "name", "else", "\" - \"", ",", "values", "[", "b3", ".", "b3_trace_id", "]", ",", "values", "[", "b3", ".", "b3_span_id", "]", ",", "\"false\"", ",", "]" ]
Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
[ "Gets", "B3", "distributed", "tracing", "information", "if", "available", ".", "This", "is", "returned", "as", "a", "list", "ready", "to", "be", "formatted", "into", "Spring", "Cloud", "Sleuth", "compatible", "format", "." ]
python
train
43.555556
Opentrons/opentrons
api/src/opentrons/protocol_api/contexts.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L1623-L1632
def load_labware(self, labware: Labware) -> Labware: """ Load labware onto a Magnetic Module, checking if it is compatible """ if labware.magdeck_engage_height is None: MODULE_LOG.warning( "This labware ({}) is not explicitly compatible with the" " Magnetic Module. You will have to specify a height when" " calling engage().") return super().load_labware(labware)
[ "def", "load_labware", "(", "self", ",", "labware", ":", "Labware", ")", "->", "Labware", ":", "if", "labware", ".", "magdeck_engage_height", "is", "None", ":", "MODULE_LOG", ".", "warning", "(", "\"This labware ({}) is not explicitly compatible with the\"", "\" Magnetic Module. You will have to specify a height when\"", "\" calling engage().\"", ")", "return", "super", "(", ")", ".", "load_labware", "(", "labware", ")" ]
Load labware onto a Magnetic Module, checking if it is compatible
[ "Load", "labware", "onto", "a", "Magnetic", "Module", "checking", "if", "it", "is", "compatible" ]
python
train
45.5
DLR-RM/RAFCON
source/rafcon/gui/controllers/utils/tree_view_controller.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L460-L463
def register_view(self, view): """Register callbacks for button press events and selection changed""" super(ListViewController, self).register_view(view) self.tree_view.connect('button_press_event', self.mouse_click)
[ "def", "register_view", "(", "self", ",", "view", ")", ":", "super", "(", "ListViewController", ",", "self", ")", ".", "register_view", "(", "view", ")", "self", ".", "tree_view", ".", "connect", "(", "'button_press_event'", ",", "self", ".", "mouse_click", ")" ]
Register callbacks for button press events and selection changed
[ "Register", "callbacks", "for", "button", "press", "events", "and", "selection", "changed" ]
python
train
59.25
Contraz/demosys-py
demosys/context/glfw/window.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/context/glfw/window.py#L107-L114
def resize(self, width, height): """ Sets the new size and buffer size internally """ self.width = width self.height = height self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window) self.set_default_viewport()
[ "def", "resize", "(", "self", ",", "width", ",", "height", ")", ":", "self", ".", "width", "=", "width", "self", ".", "height", "=", "height", "self", ".", "buffer_width", ",", "self", ".", "buffer_height", "=", "glfw", ".", "get_framebuffer_size", "(", "self", ".", "window", ")", "self", ".", "set_default_viewport", "(", ")" ]
Sets the new size and buffer size internally
[ "Sets", "the", "new", "size", "and", "buffer", "size", "internally" ]
python
valid
35.125
josegomezr/pqb
pqb/queries.py
https://github.com/josegomezr/pqb/blob/a600cc6e4e9acdaaf2cff171d13eb85c9ed1757c/pqb/queries.py#L287-L294
def to(self, to): """ [Edge-only] especifica el destino del lado """ if self._type.lower() != 'edge': raise ValueError('Cannot set From/To to non-edge objects') self._to = to return self
[ "def", "to", "(", "self", ",", "to", ")", ":", "if", "self", ".", "_type", ".", "lower", "(", ")", "!=", "'edge'", ":", "raise", "ValueError", "(", "'Cannot set From/To to non-edge objects'", ")", "self", ".", "_to", "=", "to", "return", "self" ]
[Edge-only] especifica el destino del lado
[ "[", "Edge", "-", "only", "]", "especifica", "el", "destino", "del", "lado" ]
python
train
29.875
fishtown-analytics/dbt
core/dbt/config/profile.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/config/profile.py#L300-L335
def from_raw_profiles(cls, raw_profiles, profile_name, cli_vars, target_override=None, threads_override=None): """ :param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object. """ if profile_name not in raw_profiles: raise DbtProjectError( "Could not find profile named '{}'".format(profile_name) ) # First, we've already got our final decision on profile name, and we # don't render keys, so we can pluck that out raw_profile = raw_profiles[profile_name] user_cfg = raw_profiles.get('config') return cls.from_raw_profile_info( raw_profile=raw_profile, profile_name=profile_name, cli_vars=cli_vars, user_cfg=user_cfg, target_override=target_override, threads_override=threads_override, )
[ "def", "from_raw_profiles", "(", "cls", ",", "raw_profiles", ",", "profile_name", ",", "cli_vars", ",", "target_override", "=", "None", ",", "threads_override", "=", "None", ")", ":", "if", "profile_name", "not", "in", "raw_profiles", ":", "raise", "DbtProjectError", "(", "\"Could not find profile named '{}'\"", ".", "format", "(", "profile_name", ")", ")", "# First, we've already got our final decision on profile name, and we", "# don't render keys, so we can pluck that out", "raw_profile", "=", "raw_profiles", "[", "profile_name", "]", "user_cfg", "=", "raw_profiles", ".", "get", "(", "'config'", ")", "return", "cls", ".", "from_raw_profile_info", "(", "raw_profile", "=", "raw_profile", ",", "profile_name", "=", "profile_name", ",", "cli_vars", "=", "cli_vars", ",", "user_cfg", "=", "user_cfg", ",", "target_override", "=", "target_override", ",", "threads_override", "=", "threads_override", ",", ")" ]
:param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object.
[ ":", "param", "raw_profiles", "dict", ":", "The", "profile", "data", "from", "disk", "as", "yaml", ".", ":", "param", "profile_name", "str", ":", "The", "profile", "name", "to", "use", ".", ":", "param", "cli_vars", "dict", ":", "The", "command", "-", "line", "variables", "passed", "as", "arguments", "as", "a", "dict", ".", ":", "param", "target_override", "Optional", "[", "str", "]", ":", "The", "target", "to", "use", "if", "provided", "on", "the", "command", "line", ".", ":", "param", "threads_override", "Optional", "[", "str", "]", ":", "The", "thread", "count", "to", "use", "if", "provided", "on", "the", "command", "line", ".", ":", "raises", "DbtProjectError", ":", "If", "there", "is", "no", "profile", "name", "specified", "in", "the", "project", "or", "the", "command", "line", "arguments", ":", "raises", "DbtProfileError", ":", "If", "the", "profile", "is", "invalid", "or", "missing", "or", "the", "target", "could", "not", "be", "found", ":", "returns", "Profile", ":", "The", "new", "Profile", "object", "." ]
python
train
43.194444
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L442-L449
def cfg_from_file(self, yaml_filename, config_dict): """Load a config file and merge it into the default options.""" import yaml from easydict import EasyDict as edict with open(yaml_filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) return self._merge_a_into_b(yaml_cfg, config_dict)
[ "def", "cfg_from_file", "(", "self", ",", "yaml_filename", ",", "config_dict", ")", ":", "import", "yaml", "from", "easydict", "import", "EasyDict", "as", "edict", "with", "open", "(", "yaml_filename", ",", "'r'", ")", "as", "f", ":", "yaml_cfg", "=", "edict", "(", "yaml", ".", "load", "(", "f", ")", ")", "return", "self", ".", "_merge_a_into_b", "(", "yaml_cfg", ",", "config_dict", ")" ]
Load a config file and merge it into the default options.
[ "Load", "a", "config", "file", "and", "merge", "it", "into", "the", "default", "options", "." ]
python
train
41.375
Cito/DBUtils
DBUtils/SteadyDB.py
https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/SteadyDB.py#L308-L313
def _store(self, con): """Store a database connection for subsequent use.""" self._con = con self._transaction = False self._closed = False self._usage = 0
[ "def", "_store", "(", "self", ",", "con", ")", ":", "self", ".", "_con", "=", "con", "self", ".", "_transaction", "=", "False", "self", ".", "_closed", "=", "False", "self", ".", "_usage", "=", "0" ]
Store a database connection for subsequent use.
[ "Store", "a", "database", "connection", "for", "subsequent", "use", "." ]
python
train
31.666667
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2206-L2246
def getcal(self): """Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal() """ status, cal, cal_error, offset, offset_err, data_type = \ _C.SDgetcal(self._id) _checkErr('getcal', status, 'no calibration record') return cal, cal_error, offset, offset_err, data_type
[ "def", "getcal", "(", "self", ")", ":", "status", ",", "cal", ",", "cal_error", ",", "offset", ",", "offset_err", ",", "data_type", "=", "_C", ".", "SDgetcal", "(", "self", ".", "_id", ")", "_checkErr", "(", "'getcal'", ",", "status", ",", "'no calibration record'", ")", "return", "cal", ",", "cal_error", ",", "offset", ",", "offset_err", ",", "data_type" ]
Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal()
[ "Retrieve", "the", "SDS", "calibration", "coefficients", "." ]
python
train
35.853659
quantumlib/Cirq
cirq/circuits/circuit.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L1068-L1100
def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[ops.Qid, int] = None ) -> Dict[ops.Qid, int]: """Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed. """ if frontier is None: frontier = defaultdict(lambda: 0) operations = tuple(ops.flatten_op_tree(operations)) if not operations: return frontier qubits = set(q for op in operations for q in op.qubits) if any(frontier[q] > start for q in qubits): raise ValueError('The frontier for qubits on which the operations' 'to insert act cannot be after start.') next_moments = self.next_moments_operating_on(qubits, start) insertion_indices, _ = self._pick_inserted_ops_moment_indices( operations, start, frontier) self._push_frontier(frontier, next_moments) self._insert_operations(operations, insertion_indices) return frontier
[ "def", "insert_at_frontier", "(", "self", ",", "operations", ":", "ops", ".", "OP_TREE", ",", "start", ":", "int", ",", "frontier", ":", "Dict", "[", "ops", ".", "Qid", ",", "int", "]", "=", "None", ")", "->", "Dict", "[", "ops", ".", "Qid", ",", "int", "]", ":", "if", "frontier", "is", "None", ":", "frontier", "=", "defaultdict", "(", "lambda", ":", "0", ")", "operations", "=", "tuple", "(", "ops", ".", "flatten_op_tree", "(", "operations", ")", ")", "if", "not", "operations", ":", "return", "frontier", "qubits", "=", "set", "(", "q", "for", "op", "in", "operations", "for", "q", "in", "op", ".", "qubits", ")", "if", "any", "(", "frontier", "[", "q", "]", ">", "start", "for", "q", "in", "qubits", ")", ":", "raise", "ValueError", "(", "'The frontier for qubits on which the operations'", "'to insert act cannot be after start.'", ")", "next_moments", "=", "self", ".", "next_moments_operating_on", "(", "qubits", ",", "start", ")", "insertion_indices", ",", "_", "=", "self", ".", "_pick_inserted_ops_moment_indices", "(", "operations", ",", "start", ",", "frontier", ")", "self", ".", "_push_frontier", "(", "frontier", ",", "next_moments", ")", "self", ".", "_insert_operations", "(", "operations", ",", "insertion_indices", ")", "return", "frontier" ]
Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed.
[ "Inserts", "operations", "inline", "at", "frontier", "." ]
python
train
39.666667
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L585-L617
def _normalize_orders(self): """Helper: adjust orders based on cursors, where clauses.""" orders = list(self._orders) _has_snapshot_cursor = False if self._start_at: if isinstance(self._start_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if self._end_at: if isinstance(self._end_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if _has_snapshot_cursor: should_order = [ _enum_from_op_string(key) for key in _COMPARISON_OPERATORS if key not in (_EQ_OP, "array_contains") ] order_keys = [order.field.field_path for order in orders] for filter_ in self._field_filters: field = filter_.field.field_path if filter_.op in should_order and field not in order_keys: orders.append(self._make_order(field, "ASCENDING")) if not orders: orders.append(self._make_order("__name__", "ASCENDING")) else: order_keys = [order.field.field_path for order in orders] if "__name__" not in order_keys: direction = orders[-1].direction # enum? orders.append(self._make_order("__name__", direction)) return orders
[ "def", "_normalize_orders", "(", "self", ")", ":", "orders", "=", "list", "(", "self", ".", "_orders", ")", "_has_snapshot_cursor", "=", "False", "if", "self", ".", "_start_at", ":", "if", "isinstance", "(", "self", ".", "_start_at", "[", "0", "]", ",", "document", ".", "DocumentSnapshot", ")", ":", "_has_snapshot_cursor", "=", "True", "if", "self", ".", "_end_at", ":", "if", "isinstance", "(", "self", ".", "_end_at", "[", "0", "]", ",", "document", ".", "DocumentSnapshot", ")", ":", "_has_snapshot_cursor", "=", "True", "if", "_has_snapshot_cursor", ":", "should_order", "=", "[", "_enum_from_op_string", "(", "key", ")", "for", "key", "in", "_COMPARISON_OPERATORS", "if", "key", "not", "in", "(", "_EQ_OP", ",", "\"array_contains\"", ")", "]", "order_keys", "=", "[", "order", ".", "field", ".", "field_path", "for", "order", "in", "orders", "]", "for", "filter_", "in", "self", ".", "_field_filters", ":", "field", "=", "filter_", ".", "field", ".", "field_path", "if", "filter_", ".", "op", "in", "should_order", "and", "field", "not", "in", "order_keys", ":", "orders", ".", "append", "(", "self", ".", "_make_order", "(", "field", ",", "\"ASCENDING\"", ")", ")", "if", "not", "orders", ":", "orders", ".", "append", "(", "self", ".", "_make_order", "(", "\"__name__\"", ",", "\"ASCENDING\"", ")", ")", "else", ":", "order_keys", "=", "[", "order", ".", "field", ".", "field_path", "for", "order", "in", "orders", "]", "if", "\"__name__\"", "not", "in", "order_keys", ":", "direction", "=", "orders", "[", "-", "1", "]", ".", "direction", "# enum?", "orders", ".", "append", "(", "self", ".", "_make_order", "(", "\"__name__\"", ",", "direction", ")", ")", "return", "orders" ]
Helper: adjust orders based on cursors, where clauses.
[ "Helper", ":", "adjust", "orders", "based", "on", "cursors", "where", "clauses", "." ]
python
train
41.363636
sivy/pystatsd
pystatsd/statsd.py
https://github.com/sivy/pystatsd/blob/69e362654c37df28582b12b964901334326620a7/pystatsd/statsd.py#L78-L87
def update_stats(self, stats, delta, sample_rate=1): """ Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10) """ if not isinstance(stats, list): stats = [stats] data = dict((stat, "%s|c" % delta) for stat in stats) self.send(data, sample_rate)
[ "def", "update_stats", "(", "self", ",", "stats", ",", "delta", ",", "sample_rate", "=", "1", ")", ":", "if", "not", "isinstance", "(", "stats", ",", "list", ")", ":", "stats", "=", "[", "stats", "]", "data", "=", "dict", "(", "(", "stat", ",", "\"%s|c\"", "%", "delta", ")", "for", "stat", "in", "stats", ")", "self", ".", "send", "(", "data", ",", "sample_rate", ")" ]
Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10)
[ "Updates", "one", "or", "more", "stats", "counters", "by", "arbitrary", "amounts", ">>>", "statsd_client", ".", "update_stats", "(", "some", ".", "int", "10", ")" ]
python
train
35.3
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L693-L722
def display_eventtype(self): """Read the list of event types in the annotations and update widgets. """ if self.annot is not None: event_types = sorted(self.annot.event_types, key=str.lower) else: event_types = [] self.idx_eventtype.clear() evttype_group = QGroupBox('Event Types') layout = QVBoxLayout() evttype_group.setLayout(layout) self.check_all_eventtype = check_all = QCheckBox('All event types') check_all.setCheckState(Qt.Checked) check_all.clicked.connect(self.toggle_eventtype) layout.addWidget(check_all) self.idx_eventtype_list = [] for one_eventtype in event_types: self.idx_eventtype.addItem(one_eventtype) item = QCheckBox(one_eventtype) layout.addWidget(item) item.setCheckState(Qt.Checked) item.stateChanged.connect(self.update_annotations) item.stateChanged.connect(self.toggle_check_all_eventtype) self.idx_eventtype_list.append(item) self.idx_eventtype_scroll.setWidget(evttype_group)
[ "def", "display_eventtype", "(", "self", ")", ":", "if", "self", ".", "annot", "is", "not", "None", ":", "event_types", "=", "sorted", "(", "self", ".", "annot", ".", "event_types", ",", "key", "=", "str", ".", "lower", ")", "else", ":", "event_types", "=", "[", "]", "self", ".", "idx_eventtype", ".", "clear", "(", ")", "evttype_group", "=", "QGroupBox", "(", "'Event Types'", ")", "layout", "=", "QVBoxLayout", "(", ")", "evttype_group", ".", "setLayout", "(", "layout", ")", "self", ".", "check_all_eventtype", "=", "check_all", "=", "QCheckBox", "(", "'All event types'", ")", "check_all", ".", "setCheckState", "(", "Qt", ".", "Checked", ")", "check_all", ".", "clicked", ".", "connect", "(", "self", ".", "toggle_eventtype", ")", "layout", ".", "addWidget", "(", "check_all", ")", "self", ".", "idx_eventtype_list", "=", "[", "]", "for", "one_eventtype", "in", "event_types", ":", "self", ".", "idx_eventtype", ".", "addItem", "(", "one_eventtype", ")", "item", "=", "QCheckBox", "(", "one_eventtype", ")", "layout", ".", "addWidget", "(", "item", ")", "item", ".", "setCheckState", "(", "Qt", ".", "Checked", ")", "item", ".", "stateChanged", ".", "connect", "(", "self", ".", "update_annotations", ")", "item", ".", "stateChanged", ".", "connect", "(", "self", ".", "toggle_check_all_eventtype", ")", "self", ".", "idx_eventtype_list", ".", "append", "(", "item", ")", "self", ".", "idx_eventtype_scroll", ".", "setWidget", "(", "evttype_group", ")" ]
Read the list of event types in the annotations and update widgets.
[ "Read", "the", "list", "of", "event", "types", "in", "the", "annotations", "and", "update", "widgets", "." ]
python
train
37.266667
SiLab-Bonn/pyBAR
pybar/analysis/plotting/plotting.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/plotting/plotting.py#L52-L121
def plot_linear_relation(x, y, x_err=None, y_err=None, title=None, point_label=None, legend=None, plot_range=None, plot_range_y=None, x_label=None, y_label=None, y_2_label=None, log_x=False, log_y=False, size=None, filename=None): ''' Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen ''' fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) if x_err is not None: x_err = [x_err, x_err] if y_err is not None: y_err = [y_err, y_err] ax.set_title(title) if y_label is not None: ax.set_ylabel(y_label) if log_x: ax.set_xscale('log') if log_y: ax.set_yscale('log') if plot_range: ax.set_xlim((min(plot_range), max(plot_range))) if plot_range_y: ax.set_ylim((min(plot_range_y), max(plot_range_y))) if legend: fig.legend(legend, 0) ax.grid(True) ax.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='black') # plot points # label points if needed if point_label is not None: for X, Y, Z in zip(x, y, point_label): ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(-5, 5), ha='right', textcoords='offset points') line_fit, _ = np.polyfit(x, y, 1, full=False, cov=True) fit_fn = np.poly1d(line_fit) ax.plot(x, fit_fn(x), '-', lw=2, color='gray') setp(ax.get_xticklabels(), visible=False) # remove ticks at common border of both plots divider = make_axes_locatable(ax) ax_bottom_plot = divider.append_axes("bottom", 2.0, pad=0.0, sharex=ax) ax_bottom_plot.bar(x, y - fit_fn(x), align='center', width=np.amin(np.diff(x)) / 2, color='gray') # plot(x, y - fit_fn(x)) ax_bottom_plot.grid(True) if x_label is not None: ax.set_xlabel(x_label) if y_2_label is not None: ax.set_ylabel(y_2_label) ax.set_ylim((-np.amax(np.abs(y - fit_fn(x)))), (np.amax(np.abs(y - fit_fn(x))))) ax.plot(ax.set_xlim(), [0, 0], '-', color='black') setp(ax_bottom_plot.get_yticklabels()[-2:-1], visible=False) if size is not None: fig.set_size_inches(size) if not filename: fig.show() elif isinstance(filename, PdfPages): filename.savefig(fig) elif filename: fig.savefig(filename, bbox_inches='tight') return fig
[ "def", "plot_linear_relation", "(", "x", ",", "y", ",", "x_err", "=", "None", ",", "y_err", "=", "None", ",", "title", "=", "None", ",", "point_label", "=", "None", ",", "legend", "=", "None", ",", "plot_range", "=", "None", ",", "plot_range_y", "=", "None", ",", "x_label", "=", "None", ",", "y_label", "=", "None", ",", "y_2_label", "=", "None", ",", "log_x", "=", "False", ",", "log_y", "=", "False", ",", "size", "=", "None", ",", "filename", "=", "None", ")", ":", "fig", "=", "Figure", "(", ")", "FigureCanvas", "(", "fig", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "if", "x_err", "is", "not", "None", ":", "x_err", "=", "[", "x_err", ",", "x_err", "]", "if", "y_err", "is", "not", "None", ":", "y_err", "=", "[", "y_err", ",", "y_err", "]", "ax", ".", "set_title", "(", "title", ")", "if", "y_label", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "y_label", ")", "if", "log_x", ":", "ax", ".", "set_xscale", "(", "'log'", ")", "if", "log_y", ":", "ax", ".", "set_yscale", "(", "'log'", ")", "if", "plot_range", ":", "ax", ".", "set_xlim", "(", "(", "min", "(", "plot_range", ")", ",", "max", "(", "plot_range", ")", ")", ")", "if", "plot_range_y", ":", "ax", ".", "set_ylim", "(", "(", "min", "(", "plot_range_y", ")", ",", "max", "(", "plot_range_y", ")", ")", ")", "if", "legend", ":", "fig", ".", "legend", "(", "legend", ",", "0", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "errorbar", "(", "x", ",", "y", ",", "xerr", "=", "x_err", ",", "yerr", "=", "y_err", ",", "fmt", "=", "'o'", ",", "color", "=", "'black'", ")", "# plot points\r", "# label points if needed\r", "if", "point_label", "is", "not", "None", ":", "for", "X", ",", "Y", ",", "Z", "in", "zip", "(", "x", ",", "y", ",", "point_label", ")", ":", "ax", ".", "annotate", "(", "'{}'", ".", "format", "(", "Z", ")", ",", "xy", "=", "(", "X", ",", "Y", ")", ",", "xytext", "=", "(", "-", "5", ",", "5", ")", ",", "ha", "=", "'right'", ",", "textcoords", "=", "'offset points'", ")", "line_fit", ",", "_", "=", "np", ".", "polyfit", "(", "x", ",", "y", ",", "1", ",", "full", "=", "False", ",", "cov", "=", "True", ")", "fit_fn", "=", "np", ".", "poly1d", "(", "line_fit", ")", "ax", ".", "plot", "(", "x", ",", "fit_fn", "(", "x", ")", ",", "'-'", ",", "lw", "=", "2", ",", "color", "=", "'gray'", ")", "setp", "(", "ax", ".", "get_xticklabels", "(", ")", ",", "visible", "=", "False", ")", "# remove ticks at common border of both plots\r", "divider", "=", "make_axes_locatable", "(", "ax", ")", "ax_bottom_plot", "=", "divider", ".", "append_axes", "(", "\"bottom\"", ",", "2.0", ",", "pad", "=", "0.0", ",", "sharex", "=", "ax", ")", "ax_bottom_plot", ".", "bar", "(", "x", ",", "y", "-", "fit_fn", "(", "x", ")", ",", "align", "=", "'center'", ",", "width", "=", "np", ".", "amin", "(", "np", ".", "diff", "(", "x", ")", ")", "/", "2", ",", "color", "=", "'gray'", ")", "# plot(x, y - fit_fn(x))\r", "ax_bottom_plot", ".", "grid", "(", "True", ")", "if", "x_label", "is", "not", "None", ":", "ax", ".", "set_xlabel", "(", "x_label", ")", "if", "y_2_label", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "y_2_label", ")", "ax", ".", "set_ylim", "(", "(", "-", "np", ".", "amax", "(", "np", ".", "abs", "(", "y", "-", "fit_fn", "(", "x", ")", ")", ")", ")", ",", "(", "np", ".", "amax", "(", "np", ".", "abs", "(", "y", "-", "fit_fn", "(", "x", ")", ")", ")", ")", ")", "ax", ".", "plot", "(", "ax", ".", "set_xlim", "(", ")", ",", "[", "0", ",", "0", "]", ",", "'-'", ",", "color", "=", "'black'", ")", "setp", "(", "ax_bottom_plot", ".", "get_yticklabels", "(", ")", "[", "-", "2", ":", "-", "1", "]", ",", "visible", "=", "False", ")", "if", "size", "is", "not", "None", ":", "fig", ".", "set_size_inches", "(", "size", ")", "if", "not", "filename", ":", "fig", ".", "show", "(", ")", "elif", "isinstance", "(", "filename", ",", "PdfPages", ")", ":", "filename", ".", "savefig", "(", "fig", ")", "elif", "filename", ":", "fig", ".", "savefig", "(", "filename", ",", "bbox_inches", "=", "'tight'", ")", "return", "fig" ]
Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen
[ "Takes", "point", "data", "(", "x", "y", ")", "with", "errors", "(", "x", "y", ")", "and", "fits", "a", "straight", "line", ".", "The", "deviation", "to", "this", "line", "is", "also", "plotted", "showing", "the", "offset", ".", "Parameters", "----------", "x", "y", "x_err", "y_err", ":", "iterable", "filename", ":", "string", "PdfPages", "object", "or", "None", "PdfPages", "file", "object", ":", "plot", "is", "appended", "to", "the", "pdf", "string", ":", "new", "plot", "file", "with", "the", "given", "filename", "is", "created", "None", ":", "the", "plot", "is", "printed", "to", "screen" ]
python
train
37.557143
Nic30/hwt
hwt/simulator/hdlSimulator.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/simulator/hdlSimulator.py#L296-L307
def _scheduleCombUpdateDoneEv(self) -> Event: """ Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable """ assert not self._combUpdateDonePlaned, self.now cud = Event(self) cud.process_to_wake.append(self.__deleteCombUpdateDoneEv()) self._add_process(cud, PRIORITY_AGENTS_UPDATE_DONE) self._combUpdateDonePlaned = True self.combUpdateDoneEv = cud return cud
[ "def", "_scheduleCombUpdateDoneEv", "(", "self", ")", "->", "Event", ":", "assert", "not", "self", ".", "_combUpdateDonePlaned", ",", "self", ".", "now", "cud", "=", "Event", "(", "self", ")", "cud", ".", "process_to_wake", ".", "append", "(", "self", ".", "__deleteCombUpdateDoneEv", "(", ")", ")", "self", ".", "_add_process", "(", "cud", ",", "PRIORITY_AGENTS_UPDATE_DONE", ")", "self", ".", "_combUpdateDonePlaned", "=", "True", "self", ".", "combUpdateDoneEv", "=", "cud", "return", "cud" ]
Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable
[ "Schedule", "combUpdateDoneEv", "event", "to", "let", "agents", "know", "that", "current", "delta", "step", "is", "ending", "and", "values", "from", "combinational", "logic", "are", "stable" ]
python
test
42.75
hendrix/hendrix
hendrix/contrib/cache/resource.py
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L72-L90
def handleResponseEnd(self): """ Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes. """ try: if not self._finished: reactor.callInThread( self.resource.cacheContent, self.father, self._response, self.buffer ) proxy.ProxyClient.handleResponseEnd(self) except RuntimeError: # because we don't care if the user hits # refresh before the request is done pass
[ "def", "handleResponseEnd", "(", "self", ")", ":", "try", ":", "if", "not", "self", ".", "_finished", ":", "reactor", ".", "callInThread", "(", "self", ".", "resource", ".", "cacheContent", ",", "self", ".", "father", ",", "self", ".", "_response", ",", "self", ".", "buffer", ")", "proxy", ".", "ProxyClient", ".", "handleResponseEnd", "(", "self", ")", "except", "RuntimeError", ":", "# because we don't care if the user hits", "# refresh before the request is done", "pass" ]
Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes.
[ "Extends", "handleResponseEnd", "to", "not", "care", "about", "the", "user", "closing", "/", "refreshing", "their", "browser", "before", "the", "response", "is", "finished", ".", "Also", "calls", "cacheContent", "in", "a", "thread", "that", "we", "don", "t", "care", "when", "it", "finishes", "." ]
python
train
37.105263
borisbabic/browser_cookie3
__init__.py
https://github.com/borisbabic/browser_cookie3/blob/e695777c54509c286991c5bb5ca65f043d748f55/__init__.py#L183-L202
def _decrypt(self, value, encrypted_value): """Decrypt encoded cookies """ if sys.platform == 'win32': return self._decrypt_windows_chrome(value, encrypted_value) if value or (encrypted_value[:3] != b'v10'): return value # Encrypted cookies should be prefixed with 'v10' according to the # Chromium code. Strip it off. encrypted_value = encrypted_value[3:] encrypted_value_half_len = int(len(encrypted_value) / 2) cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv)) decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len]) decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:]) decrypted += cipher.feed() return decrypted.decode("utf-8")
[ "def", "_decrypt", "(", "self", ",", "value", ",", "encrypted_value", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "return", "self", ".", "_decrypt_windows_chrome", "(", "value", ",", "encrypted_value", ")", "if", "value", "or", "(", "encrypted_value", "[", ":", "3", "]", "!=", "b'v10'", ")", ":", "return", "value", "# Encrypted cookies should be prefixed with 'v10' according to the", "# Chromium code. Strip it off.", "encrypted_value", "=", "encrypted_value", "[", "3", ":", "]", "encrypted_value_half_len", "=", "int", "(", "len", "(", "encrypted_value", ")", "/", "2", ")", "cipher", "=", "pyaes", ".", "Decrypter", "(", "pyaes", ".", "AESModeOfOperationCBC", "(", "self", ".", "key", ",", "self", ".", "iv", ")", ")", "decrypted", "=", "cipher", ".", "feed", "(", "encrypted_value", "[", ":", "encrypted_value_half_len", "]", ")", "decrypted", "+=", "cipher", ".", "feed", "(", "encrypted_value", "[", "encrypted_value_half_len", ":", "]", ")", "decrypted", "+=", "cipher", ".", "feed", "(", ")", "return", "decrypted", ".", "decode", "(", "\"utf-8\"", ")" ]
Decrypt encoded cookies
[ "Decrypt", "encoded", "cookies" ]
python
valid
39.8
wind-python/windpowerlib
example/modelchain_example.py
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/example/modelchain_example.py#L216-L262
def plot_or_print(my_turbine, e126, dummy_turbine): r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file. """ # plot or print turbine power output if plt: e126.power_output.plot(legend=True, label='Enercon E126') my_turbine.power_output.plot(legend=True, label='myTurbine') dummy_turbine.power_output.plot(legend=True, label='dummyTurbine') plt.show() else: print(e126.power_output) print(my_turbine.power_output) print(dummy_turbine.power_output) # plot or print power curve if plt: if e126.power_curve is not None: e126.power_curve.plot(x='wind_speed', y='value', style='*', title='Enercon E126 power curve') plt.show() if my_turbine.power_curve is not None: my_turbine.power_curve.plot(x='wind_speed', y='value', style='*', title='myTurbine power curve') plt.show() if dummy_turbine.power_coefficient_curve is not None: dummy_turbine.power_coefficient_curve.plot( x='wind_speed', y='value', style='*', title='dummyTurbine power coefficient curve') plt.show() else: if e126.power_coefficient_curve is not None: print(e126.power_coefficient_curve) if e126.power_curve is not None: print(e126.power_curve)
[ "def", "plot_or_print", "(", "my_turbine", ",", "e126", ",", "dummy_turbine", ")", ":", "# plot or print turbine power output", "if", "plt", ":", "e126", ".", "power_output", ".", "plot", "(", "legend", "=", "True", ",", "label", "=", "'Enercon E126'", ")", "my_turbine", ".", "power_output", ".", "plot", "(", "legend", "=", "True", ",", "label", "=", "'myTurbine'", ")", "dummy_turbine", ".", "power_output", ".", "plot", "(", "legend", "=", "True", ",", "label", "=", "'dummyTurbine'", ")", "plt", ".", "show", "(", ")", "else", ":", "print", "(", "e126", ".", "power_output", ")", "print", "(", "my_turbine", ".", "power_output", ")", "print", "(", "dummy_turbine", ".", "power_output", ")", "# plot or print power curve", "if", "plt", ":", "if", "e126", ".", "power_curve", "is", "not", "None", ":", "e126", ".", "power_curve", ".", "plot", "(", "x", "=", "'wind_speed'", ",", "y", "=", "'value'", ",", "style", "=", "'*'", ",", "title", "=", "'Enercon E126 power curve'", ")", "plt", ".", "show", "(", ")", "if", "my_turbine", ".", "power_curve", "is", "not", "None", ":", "my_turbine", ".", "power_curve", ".", "plot", "(", "x", "=", "'wind_speed'", ",", "y", "=", "'value'", ",", "style", "=", "'*'", ",", "title", "=", "'myTurbine power curve'", ")", "plt", ".", "show", "(", ")", "if", "dummy_turbine", ".", "power_coefficient_curve", "is", "not", "None", ":", "dummy_turbine", ".", "power_coefficient_curve", ".", "plot", "(", "x", "=", "'wind_speed'", ",", "y", "=", "'value'", ",", "style", "=", "'*'", ",", "title", "=", "'dummyTurbine power coefficient curve'", ")", "plt", ".", "show", "(", ")", "else", ":", "if", "e126", ".", "power_coefficient_curve", "is", "not", "None", ":", "print", "(", "e126", ".", "power_coefficient_curve", ")", "if", "e126", ".", "power_curve", "is", "not", "None", ":", "print", "(", "e126", ".", "power_curve", ")" ]
r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file.
[ "r", "Plots", "or", "prints", "power", "output", "and", "power", "(", "coefficient", ")", "curves", "." ]
python
train
37.319149
Karaage-Cluster/karaage
karaage/common/create_update.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/common/create_update.py#L24-L50
def get_model_and_form_class(model, form_class): """ Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. """ if form_class: return form_class._meta.model, form_class if model: # The inner Meta class fails if model = model is used for some reason. tmp_model = model # TODO: we should be able to construct a ModelForm without creating # and passing in a temporary inner class. class Meta: model = tmp_model class_name = model.__name__ + 'Form' form_class = ModelFormMetaclass( class_name, (ModelForm,), {'Meta': Meta}) return model, form_class raise GenericViewError("Generic view must be called with either a model or" " form_class argument.")
[ "def", "get_model_and_form_class", "(", "model", ",", "form_class", ")", ":", "if", "form_class", ":", "return", "form_class", ".", "_meta", ".", "model", ",", "form_class", "if", "model", ":", "# The inner Meta class fails if model = model is used for some reason.", "tmp_model", "=", "model", "# TODO: we should be able to construct a ModelForm without creating", "# and passing in a temporary inner class.", "class", "Meta", ":", "model", "=", "tmp_model", "class_name", "=", "model", ".", "__name__", "+", "'Form'", "form_class", "=", "ModelFormMetaclass", "(", "class_name", ",", "(", "ModelForm", ",", ")", ",", "{", "'Meta'", ":", "Meta", "}", ")", "return", "model", ",", "form_class", "raise", "GenericViewError", "(", "\"Generic view must be called with either a model or\"", "\" form_class argument.\"", ")" ]
Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``.
[ "Returns", "a", "model", "and", "form", "class", "based", "on", "the", "model", "and", "form_class", "parameters", "that", "were", "passed", "to", "the", "generic", "view", "." ]
python
train
39.925926
briney/abutils
abutils/utils/mongodb.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/mongodb.py#L239-L312
def mongoimport(json, database, ip='localhost', port=27017, user=None, password=None, delim='_', delim1=None, delim2=None, delim_occurance=1, delim1_occurance=1, delim2_occurance=1): ''' Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1`` ''' logger = log.get_logger('mongodb') _print_mongoimport_info(logger) if type(json) in (list, tuple): pass elif os.path.isdir(json): from abtools.utils.pipeline import list_files json = list_files(json) else: json = [json, ] jsons = sorted([os.path.expanduser(j) for j in json if j.endswith('.json')]) collections = _get_import_collections(jsons, delim, delim_occurance, delim1, delim1_occurance, delim2, delim2_occurance) logger.info('Found {} files to import'.format(len(jsons))) logger.info('') for i, (json_file, collection) in enumerate(zip(jsons, collections)): logger.info('[ {} ] {} --> {}'.format(i + 1, os.path.basename(json_file), collection)) # logger.info("Performing mongoimport on {}.".format(os.path.basename(json_file))) # logger.info("Importing the file into collection {}.".format(collection)) if all([user, password]): host = '--host {} --port {} -username {} -password {}'.format(ip, port, user, password) else: host = '--host {} --port {}'.format(ip, port) mongo_cmd = "mongoimport {} --db {} --collection {} --file {}".format( host, database, collection, json_file) mongo = sp.Popen(mongo_cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = mongo.communicate()
[ "def", "mongoimport", "(", "json", ",", "database", ",", "ip", "=", "'localhost'", ",", "port", "=", "27017", ",", "user", "=", "None", ",", "password", "=", "None", ",", "delim", "=", "'_'", ",", "delim1", "=", "None", ",", "delim2", "=", "None", ",", "delim_occurance", "=", "1", ",", "delim1_occurance", "=", "1", ",", "delim2_occurance", "=", "1", ")", ":", "logger", "=", "log", ".", "get_logger", "(", "'mongodb'", ")", "_print_mongoimport_info", "(", "logger", ")", "if", "type", "(", "json", ")", "in", "(", "list", ",", "tuple", ")", ":", "pass", "elif", "os", ".", "path", ".", "isdir", "(", "json", ")", ":", "from", "abtools", ".", "utils", ".", "pipeline", "import", "list_files", "json", "=", "list_files", "(", "json", ")", "else", ":", "json", "=", "[", "json", ",", "]", "jsons", "=", "sorted", "(", "[", "os", ".", "path", ".", "expanduser", "(", "j", ")", "for", "j", "in", "json", "if", "j", ".", "endswith", "(", "'.json'", ")", "]", ")", "collections", "=", "_get_import_collections", "(", "jsons", ",", "delim", ",", "delim_occurance", ",", "delim1", ",", "delim1_occurance", ",", "delim2", ",", "delim2_occurance", ")", "logger", ".", "info", "(", "'Found {} files to import'", ".", "format", "(", "len", "(", "jsons", ")", ")", ")", "logger", ".", "info", "(", "''", ")", "for", "i", ",", "(", "json_file", ",", "collection", ")", "in", "enumerate", "(", "zip", "(", "jsons", ",", "collections", ")", ")", ":", "logger", ".", "info", "(", "'[ {} ] {} --> {}'", ".", "format", "(", "i", "+", "1", ",", "os", ".", "path", ".", "basename", "(", "json_file", ")", ",", "collection", ")", ")", "# logger.info(\"Performing mongoimport on {}.\".format(os.path.basename(json_file)))", "# logger.info(\"Importing the file into collection {}.\".format(collection))", "if", "all", "(", "[", "user", ",", "password", "]", ")", ":", "host", "=", "'--host {} --port {} -username {} -password {}'", ".", "format", "(", "ip", ",", "port", ",", "user", ",", "password", ")", "else", ":", "host", "=", "'--host {} --port {}'", ".", "format", "(", "ip", ",", "port", ")", "mongo_cmd", "=", "\"mongoimport {} --db {} --collection {} --file {}\"", ".", "format", "(", "host", ",", "database", ",", "collection", ",", "json_file", ")", "mongo", "=", "sp", ".", "Popen", "(", "mongo_cmd", ",", "shell", "=", "True", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "mongo", ".", "communicate", "(", ")" ]
Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1``
[ "Performs", "mongoimport", "on", "one", "or", "more", "json", "files", "." ]
python
train
43.743243
Robpol86/colorclass
colorclass/core.py
https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/core.py#L189-L196
def index(self, sub, start=None, end=None): """Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.index(sub, start, end)
[ "def", "index", "(", "self", ",", "sub", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "return", "self", ".", "value_no_colors", ".", "index", "(", "sub", ",", "start", ",", "end", ")" ]
Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position.
[ "Like", "S", ".", "find", "()", "but", "raise", "ValueError", "when", "the", "substring", "is", "not", "found", "." ]
python
train
42
google/grumpy
third_party/stdlib/bisect.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/bisect.py#L3-L20
def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
[ "def", "insort_right", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "a", ")", "while", "lo", "<", "hi", ":", "mid", "=", "(", "lo", "+", "hi", ")", "//", "2", "if", "x", "<", "a", "[", "mid", "]", ":", "hi", "=", "mid", "else", ":", "lo", "=", "mid", "+", "1", "a", ".", "insert", "(", "lo", ",", "x", ")" ]
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
[ "Insert", "item", "x", "in", "list", "a", "and", "keep", "it", "sorted", "assuming", "a", "is", "sorted", "." ]
python
valid
27.722222
jaraco/tempora
tempora/__init__.py
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L282-L294
def get_nearest_year_for_day(day): """ Returns the nearest year to now inferred from a Julian date. """ now = time.gmtime() result = now.tm_year # if the day is far greater than today, it must be from last year if day - now.tm_yday > 365 // 2: result -= 1 # if the day is far less than today, it must be for next year. if now.tm_yday - day > 365 // 2: result += 1 return result
[ "def", "get_nearest_year_for_day", "(", "day", ")", ":", "now", "=", "time", ".", "gmtime", "(", ")", "result", "=", "now", ".", "tm_year", "# if the day is far greater than today, it must be from last year", "if", "day", "-", "now", ".", "tm_yday", ">", "365", "//", "2", ":", "result", "-=", "1", "# if the day is far less than today, it must be for next year.", "if", "now", ".", "tm_yday", "-", "day", ">", "365", "//", "2", ":", "result", "+=", "1", "return", "result" ]
Returns the nearest year to now inferred from a Julian date.
[ "Returns", "the", "nearest", "year", "to", "now", "inferred", "from", "a", "Julian", "date", "." ]
python
valid
29.153846
apache/airflow
airflow/contrib/hooks/gcp_mlengine_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L165-L183
def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
[ "def", "create_version", "(", "self", ",", "project_id", ",", "model_name", ",", "version_spec", ")", ":", "parent_name", "=", "'projects/{}/models/{}'", ".", "format", "(", "project_id", ",", "model_name", ")", "create_request", "=", "self", ".", "_mlengine", ".", "projects", "(", ")", ".", "models", "(", ")", ".", "versions", "(", ")", ".", "create", "(", "parent", "=", "parent_name", ",", "body", "=", "version_spec", ")", "response", "=", "create_request", ".", "execute", "(", ")", "get_request", "=", "self", ".", "_mlengine", ".", "projects", "(", ")", ".", "operations", "(", ")", ".", "get", "(", "name", "=", "response", "[", "'name'", "]", ")", "return", "_poll_with_exponential_delay", "(", "request", "=", "get_request", ",", "max_n", "=", "9", ",", "is_done_func", "=", "lambda", "resp", ":", "resp", ".", "get", "(", "'done'", ",", "False", ")", ",", "is_error_func", "=", "lambda", "resp", ":", "resp", ".", "get", "(", "'error'", ",", "None", ")", "is", "not", "None", ")" ]
Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise.
[ "Creates", "the", "Version", "on", "Google", "Cloud", "ML", "Engine", "." ]
python
test
43.421053
caktus/django-timepiece
timepiece/management/commands/check_entries.py
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L135-L155
def find_users(self, *args): """ Returns the users to search given names as args. Return all users if there are no args provided. """ if args: names = reduce(lambda query, arg: query | (Q(first_name__icontains=arg) | Q(last_name__icontains=arg)), args, Q()) # noqa users = User.objects.filter(names) # If no args given, check every user else: users = User.objects.all() # Display errors if no user was found if not users.count() and args: if len(args) == 1: raise CommandError('No user was found with the name %s' % args[0]) else: arg_list = ', '.join(args) raise CommandError('No users found with the names: %s' % arg_list) return users
[ "def", "find_users", "(", "self", ",", "*", "args", ")", ":", "if", "args", ":", "names", "=", "reduce", "(", "lambda", "query", ",", "arg", ":", "query", "|", "(", "Q", "(", "first_name__icontains", "=", "arg", ")", "|", "Q", "(", "last_name__icontains", "=", "arg", ")", ")", ",", "args", ",", "Q", "(", ")", ")", "# noqa", "users", "=", "User", ".", "objects", ".", "filter", "(", "names", ")", "# If no args given, check every user", "else", ":", "users", "=", "User", ".", "objects", ".", "all", "(", ")", "# Display errors if no user was found", "if", "not", "users", ".", "count", "(", ")", "and", "args", ":", "if", "len", "(", "args", ")", "==", "1", ":", "raise", "CommandError", "(", "'No user was found with the name %s'", "%", "args", "[", "0", "]", ")", "else", ":", "arg_list", "=", "', '", ".", "join", "(", "args", ")", "raise", "CommandError", "(", "'No users found with the names: %s'", "%", "arg_list", ")", "return", "users" ]
Returns the users to search given names as args. Return all users if there are no args provided.
[ "Returns", "the", "users", "to", "search", "given", "names", "as", "args", ".", "Return", "all", "users", "if", "there", "are", "no", "args", "provided", "." ]
python
train
39.904762
saltstack/salt
salt/proxy/nxos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/nxos.py#L521-L539
def _nxapi_request(commands, method='cli_conf', **kwargs): ''' Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``. ''' if CONNECTION == 'ssh': return '_nxapi_request is not available for ssh proxy' conn_args = DEVICE_DETAILS['conn_args'] conn_args.update(kwargs) data = __utils__['nxos.nxapi_request'](commands, method=method, **conn_args) return data
[ "def", "_nxapi_request", "(", "commands", ",", "method", "=", "'cli_conf'", ",", "*", "*", "kwargs", ")", ":", "if", "CONNECTION", "==", "'ssh'", ":", "return", "'_nxapi_request is not available for ssh proxy'", "conn_args", "=", "DEVICE_DETAILS", "[", "'conn_args'", "]", "conn_args", ".", "update", "(", "kwargs", ")", "data", "=", "__utils__", "[", "'nxos.nxapi_request'", "]", "(", "commands", ",", "method", "=", "method", ",", "*", "*", "conn_args", ")", "return", "data" ]
Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``.
[ "Executes", "an", "nxapi_request", "request", "over", "NX", "-", "API", "." ]
python
train
35.315789
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/sql.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/sql.py#L286-L309
def _transactional(self, method, *argv, **argd): """ Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method. """ self._session.begin(subtransactions = True) try: result = method(self, *argv, **argd) self._session.commit() return result except: self._session.rollback() raise
[ "def", "_transactional", "(", "self", ",", "method", ",", "*", "argv", ",", "*", "*", "argd", ")", ":", "self", ".", "_session", ".", "begin", "(", "subtransactions", "=", "True", ")", "try", ":", "result", "=", "method", "(", "self", ",", "*", "argv", ",", "*", "*", "argd", ")", "self", ".", "_session", ".", "commit", "(", ")", "return", "result", "except", ":", "self", ".", "_session", ".", "rollback", "(", ")", "raise" ]
Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method.
[ "Begins", "a", "transaction", "and", "calls", "the", "given", "DAO", "method", "." ]
python
train
32.25
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/notification.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/notification.py#L95-L109
def _observers_for_notification(self, ntype, sender): """Find all registered observers that should recieve notification""" keys = ( (ntype,sender), (ntype, None), (None, sender), (None,None) ) obs = set() for k in keys: obs.update(self.observers.get(k, set())) return obs
[ "def", "_observers_for_notification", "(", "self", ",", "ntype", ",", "sender", ")", ":", "keys", "=", "(", "(", "ntype", ",", "sender", ")", ",", "(", "ntype", ",", "None", ")", ",", "(", "None", ",", "sender", ")", ",", "(", "None", ",", "None", ")", ")", "obs", "=", "set", "(", ")", "for", "k", "in", "keys", ":", "obs", ".", "update", "(", "self", ".", "observers", ".", "get", "(", "k", ",", "set", "(", ")", ")", ")", "return", "obs" ]
Find all registered observers that should recieve notification
[ "Find", "all", "registered", "observers", "that", "should", "recieve", "notification" ]
python
test
26.866667
PyFilesystem/pyfilesystem2
fs/ftpfs.py
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/ftpfs.py#L440-L448
def ftp_url(self): # type: () -> Text """Get the FTP url this filesystem will open.""" url = ( "ftp://{}".format(self.host) if self.port == 21 else "ftp://{}:{}".format(self.host, self.port) ) return url
[ "def", "ftp_url", "(", "self", ")", ":", "# type: () -> Text", "url", "=", "(", "\"ftp://{}\"", ".", "format", "(", "self", ".", "host", ")", "if", "self", ".", "port", "==", "21", "else", "\"ftp://{}:{}\"", ".", "format", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "return", "url" ]
Get the FTP url this filesystem will open.
[ "Get", "the", "FTP", "url", "this", "filesystem", "will", "open", "." ]
python
train
30.111111
peopledoc/workalendar
workalendar/core.py
https://github.com/peopledoc/workalendar/blob/d044d5dfc1709ec388db34dab583dd554cc66c4e/workalendar/core.py#L566-L633
def get_chinese_new_year(self, year): """ Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after. """ days = [] lunar_first_day = ChineseNewYearCalendar.lunar(year, 1, 1) # Chinese new year's eve if self.include_chinese_new_year_eve: days.append(( lunar_first_day - timedelta(days=1), self.chinese_new_year_eve_label )) # Chinese new year (is included by default) if self.include_chinese_new_year: days.append((lunar_first_day, self.chinese_new_year_label)) if self.include_chinese_second_day: lunar_second_day = lunar_first_day + timedelta(days=1) days.append(( lunar_second_day, self.chinese_second_day_label )) if self.include_chinese_third_day: lunar_third_day = lunar_first_day + timedelta(days=2) days.append(( lunar_third_day, self.chinese_third_day_label )) if self.shift_sunday_holidays: if lunar_first_day.weekday() == SUN: if self.shift_start_cny_sunday: days.append( (lunar_first_day - timedelta(days=1), "Chinese Lunar New Year shift"), ) else: if self.include_chinese_third_day: shift_day = lunar_third_day else: shift_day = lunar_second_day days.append( (shift_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) if (lunar_second_day.weekday() == SUN and self.include_chinese_third_day): days.append( (lunar_third_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) return days
[ "def", "get_chinese_new_year", "(", "self", ",", "year", ")", ":", "days", "=", "[", "]", "lunar_first_day", "=", "ChineseNewYearCalendar", ".", "lunar", "(", "year", ",", "1", ",", "1", ")", "# Chinese new year's eve", "if", "self", ".", "include_chinese_new_year_eve", ":", "days", ".", "append", "(", "(", "lunar_first_day", "-", "timedelta", "(", "days", "=", "1", ")", ",", "self", ".", "chinese_new_year_eve_label", ")", ")", "# Chinese new year (is included by default)", "if", "self", ".", "include_chinese_new_year", ":", "days", ".", "append", "(", "(", "lunar_first_day", ",", "self", ".", "chinese_new_year_label", ")", ")", "if", "self", ".", "include_chinese_second_day", ":", "lunar_second_day", "=", "lunar_first_day", "+", "timedelta", "(", "days", "=", "1", ")", "days", ".", "append", "(", "(", "lunar_second_day", ",", "self", ".", "chinese_second_day_label", ")", ")", "if", "self", ".", "include_chinese_third_day", ":", "lunar_third_day", "=", "lunar_first_day", "+", "timedelta", "(", "days", "=", "2", ")", "days", ".", "append", "(", "(", "lunar_third_day", ",", "self", ".", "chinese_third_day_label", ")", ")", "if", "self", ".", "shift_sunday_holidays", ":", "if", "lunar_first_day", ".", "weekday", "(", ")", "==", "SUN", ":", "if", "self", ".", "shift_start_cny_sunday", ":", "days", ".", "append", "(", "(", "lunar_first_day", "-", "timedelta", "(", "days", "=", "1", ")", ",", "\"Chinese Lunar New Year shift\"", ")", ",", ")", "else", ":", "if", "self", ".", "include_chinese_third_day", ":", "shift_day", "=", "lunar_third_day", "else", ":", "shift_day", "=", "lunar_second_day", "days", ".", "append", "(", "(", "shift_day", "+", "timedelta", "(", "days", "=", "1", ")", ",", "\"Chinese Lunar New Year shift\"", ")", ",", ")", "if", "(", "lunar_second_day", ".", "weekday", "(", ")", "==", "SUN", "and", "self", ".", "include_chinese_third_day", ")", ":", "days", ".", "append", "(", "(", "lunar_third_day", "+", "timedelta", "(", "days", "=", "1", ")", ",", "\"Chinese Lunar New Year shift\"", ")", ",", ")", "return", "days" ]
Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after.
[ "Compute", "Chinese", "New", "Year", "days", ".", "To", "return", "a", "list", "of", "holidays", "." ]
python
train
38.058824
maxcountryman/atomos
atomos/atomic.py
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L59-L73
def compare_and_set(self, expect, update): ''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._lock.exclusive: if self._value == expect: self._value = update return True return False
[ "def", "compare_and_set", "(", "self", ",", "expect", ",", "update", ")", ":", "with", "self", ".", "_lock", ".", "exclusive", ":", "if", "self", ".", "_value", "==", "expect", ":", "self", ".", "_value", "=", "update", "return", "True", "return", "False" ]
Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value.
[ "Atomically", "sets", "the", "value", "to", "update", "if", "the", "current", "value", "is", "equal", "to", "expect", "." ]
python
train
31.133333
bukun/TorCMS
torcms/handlers/evaluation_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/evaluation_handler.py#L34-L48
def add_or_update(self, app_id, value): ''' Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format. ''' MEvaluation.add_or_update(self.userinfo.uid, app_id, value) out_dic = { 'eval0': MEvaluation.app_evaluation_count(app_id, 0), 'eval1': MEvaluation.app_evaluation_count(app_id, 1) } return json.dump(out_dic, self)
[ "def", "add_or_update", "(", "self", ",", "app_id", ",", "value", ")", ":", "MEvaluation", ".", "add_or_update", "(", "self", ".", "userinfo", ".", "uid", ",", "app_id", ",", "value", ")", "out_dic", "=", "{", "'eval0'", ":", "MEvaluation", ".", "app_evaluation_count", "(", "app_id", ",", "0", ")", ",", "'eval1'", ":", "MEvaluation", ".", "app_evaluation_count", "(", "app_id", ",", "1", ")", "}", "return", "json", ".", "dump", "(", "out_dic", ",", "self", ")" ]
Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format.
[ "Adding", "or", "updating", "the", "evalution", ".", ":", "param", "app_id", ":", "the", "ID", "of", "the", "post", ".", ":", "param", "value", ":", "the", "evaluation", ":", "return", ":", "in", "JSON", "format", "." ]
python
train
31.8
genialis/resolwe
resolwe/elastic/builder.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/builder.py#L57-L76
def set(self, obj, build_kwargs): """Set cached value.""" if build_kwargs is None: build_kwargs = {} cached = {} if 'queryset' in build_kwargs: cached = { 'model': build_kwargs['queryset'].model, 'pks': list(build_kwargs['queryset'].values_list('pk', flat=True)), } elif 'obj' in build_kwargs: cached = { 'obj': build_kwargs['obj'], } if not hasattr(self._thread_local, 'cache'): self._thread_local.cache = {} self._thread_local.cache[self._get_cache_key(obj)] = cached
[ "def", "set", "(", "self", ",", "obj", ",", "build_kwargs", ")", ":", "if", "build_kwargs", "is", "None", ":", "build_kwargs", "=", "{", "}", "cached", "=", "{", "}", "if", "'queryset'", "in", "build_kwargs", ":", "cached", "=", "{", "'model'", ":", "build_kwargs", "[", "'queryset'", "]", ".", "model", ",", "'pks'", ":", "list", "(", "build_kwargs", "[", "'queryset'", "]", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", ",", "}", "elif", "'obj'", "in", "build_kwargs", ":", "cached", "=", "{", "'obj'", ":", "build_kwargs", "[", "'obj'", "]", ",", "}", "if", "not", "hasattr", "(", "self", ".", "_thread_local", ",", "'cache'", ")", ":", "self", ".", "_thread_local", ".", "cache", "=", "{", "}", "self", ".", "_thread_local", ".", "cache", "[", "self", ".", "_get_cache_key", "(", "obj", ")", "]", "=", "cached" ]
Set cached value.
[ "Set", "cached", "value", "." ]
python
train
31.45
MycroftAI/mycroft-precise
precise/util.py
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/util.py#L68-L77
def play_audio(filename: str): """ Args: filename: Audio filename """ import platform from subprocess import Popen player = 'play' if platform.system() == 'Darwin' else 'aplay' Popen([player, '-q', filename])
[ "def", "play_audio", "(", "filename", ":", "str", ")", ":", "import", "platform", "from", "subprocess", "import", "Popen", "player", "=", "'play'", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", "else", "'aplay'", "Popen", "(", "[", "player", ",", "'-q'", ",", "filename", "]", ")" ]
Args: filename: Audio filename
[ "Args", ":", "filename", ":", "Audio", "filename" ]
python
train
23.6
ahwillia/tensortools
tensortools/diagnostics.py
https://github.com/ahwillia/tensortools/blob/f375633ec621caa96665a56205dcf932590d4a6e/tensortools/diagnostics.py#L9-L95
def kruskal_align(U, V, permute_U=False, permute_V=False): """Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one. """ # Compute similarity matrices. unrm = [f / np.linalg.norm(f, axis=0) for f in U.factors] vnrm = [f / np.linalg.norm(f, axis=0) for f in V.factors] sim_matrices = [np.dot(u.T, v) for u, v in zip(unrm, vnrm)] cost = 1 - np.mean(np.abs(sim_matrices), axis=0) # Solve matching problem via Hungarian algorithm. indices = Munkres().compute(cost.copy()) prmU, prmV = zip(*indices) # Compute mean factor similarity given the optimal matching. similarity = np.mean(1 - cost[prmU, prmV]) # If U and V are of different ranks, identify unmatched factors. unmatched_U = list(set(range(U.rank)) - set(prmU)) unmatched_V = list(set(range(V.rank)) - set(prmV)) # If permuting both U and V, order factors from most to least similar. if permute_U and permute_V: idx = np.argsort(cost[prmU, prmV]) # If permute_U is False, then order the factors such that the ordering # for U is unchanged. elif permute_V: idx = np.argsort(prmU) # If permute_V is False, then order the factors such that the ordering # for V is unchanged. elif permute_U: idx = np.argsort(prmV) # If permute_U and permute_V are both False, then we are done and can # simply return the similarity. else: return similarity # Re-order the factor permutations. prmU = [prmU[i] for i in idx] prmV = [prmV[i] for i in idx] # Permute the factors. if permute_U: U.permute(prmU) if permute_V: V.permute(prmV) # Flip the signs of factors. flips = np.sign([F[prmU, prmV] for F in sim_matrices]) flips[0] *= np.prod(flips, axis=0) # always flip an even number of factors if permute_U: for i, f in enumerate(flips): U.factors[i] *= f elif permute_V: for i, f in enumerate(flips): V.factors[i] *= f # Return the similarity score return similarity
[ "def", "kruskal_align", "(", "U", ",", "V", ",", "permute_U", "=", "False", ",", "permute_V", "=", "False", ")", ":", "# Compute similarity matrices.", "unrm", "=", "[", "f", "/", "np", ".", "linalg", ".", "norm", "(", "f", ",", "axis", "=", "0", ")", "for", "f", "in", "U", ".", "factors", "]", "vnrm", "=", "[", "f", "/", "np", ".", "linalg", ".", "norm", "(", "f", ",", "axis", "=", "0", ")", "for", "f", "in", "V", ".", "factors", "]", "sim_matrices", "=", "[", "np", ".", "dot", "(", "u", ".", "T", ",", "v", ")", "for", "u", ",", "v", "in", "zip", "(", "unrm", ",", "vnrm", ")", "]", "cost", "=", "1", "-", "np", ".", "mean", "(", "np", ".", "abs", "(", "sim_matrices", ")", ",", "axis", "=", "0", ")", "# Solve matching problem via Hungarian algorithm.", "indices", "=", "Munkres", "(", ")", ".", "compute", "(", "cost", ".", "copy", "(", ")", ")", "prmU", ",", "prmV", "=", "zip", "(", "*", "indices", ")", "# Compute mean factor similarity given the optimal matching.", "similarity", "=", "np", ".", "mean", "(", "1", "-", "cost", "[", "prmU", ",", "prmV", "]", ")", "# If U and V are of different ranks, identify unmatched factors.", "unmatched_U", "=", "list", "(", "set", "(", "range", "(", "U", ".", "rank", ")", ")", "-", "set", "(", "prmU", ")", ")", "unmatched_V", "=", "list", "(", "set", "(", "range", "(", "V", ".", "rank", ")", ")", "-", "set", "(", "prmV", ")", ")", "# If permuting both U and V, order factors from most to least similar.", "if", "permute_U", "and", "permute_V", ":", "idx", "=", "np", ".", "argsort", "(", "cost", "[", "prmU", ",", "prmV", "]", ")", "# If permute_U is False, then order the factors such that the ordering", "# for U is unchanged.", "elif", "permute_V", ":", "idx", "=", "np", ".", "argsort", "(", "prmU", ")", "# If permute_V is False, then order the factors such that the ordering", "# for V is unchanged.", "elif", "permute_U", ":", "idx", "=", "np", ".", "argsort", "(", "prmV", ")", "# If permute_U and permute_V are both False, then we are done and can", "# simply return the similarity.", "else", ":", "return", "similarity", "# Re-order the factor permutations.", "prmU", "=", "[", "prmU", "[", "i", "]", "for", "i", "in", "idx", "]", "prmV", "=", "[", "prmV", "[", "i", "]", "for", "i", "in", "idx", "]", "# Permute the factors.", "if", "permute_U", ":", "U", ".", "permute", "(", "prmU", ")", "if", "permute_V", ":", "V", ".", "permute", "(", "prmV", ")", "# Flip the signs of factors.", "flips", "=", "np", ".", "sign", "(", "[", "F", "[", "prmU", ",", "prmV", "]", "for", "F", "in", "sim_matrices", "]", ")", "flips", "[", "0", "]", "*=", "np", ".", "prod", "(", "flips", ",", "axis", "=", "0", ")", "# always flip an even number of factors", "if", "permute_U", ":", "for", "i", ",", "f", "in", "enumerate", "(", "flips", ")", ":", "U", ".", "factors", "[", "i", "]", "*=", "f", "elif", "permute_V", ":", "for", "i", ",", "f", "in", "enumerate", "(", "flips", ")", ":", "V", ".", "factors", "[", "i", "]", "*=", "f", "# Return the similarity score", "return", "similarity" ]
Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one.
[ "Aligns", "two", "KTensors", "and", "returns", "a", "similarity", "score", "." ]
python
train
30.689655
crate/crate-python
src/crate/client/http.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/http.py#L318-L331
def sql(self, stmt, parameters=None, bulk_parameters=None): """ Execute SQL stmt against the crate server. """ if stmt is None: return None data = _create_sql_payload(stmt, parameters, bulk_parameters) logger.debug( 'Sending request to %s with payload: %s', self.path, data) content = self._json_request('POST', self.path, data=data) logger.debug("JSON response for stmt(%s): %s", stmt, content) return content
[ "def", "sql", "(", "self", ",", "stmt", ",", "parameters", "=", "None", ",", "bulk_parameters", "=", "None", ")", ":", "if", "stmt", "is", "None", ":", "return", "None", "data", "=", "_create_sql_payload", "(", "stmt", ",", "parameters", ",", "bulk_parameters", ")", "logger", ".", "debug", "(", "'Sending request to %s with payload: %s'", ",", "self", ".", "path", ",", "data", ")", "content", "=", "self", ".", "_json_request", "(", "'POST'", ",", "self", ".", "path", ",", "data", "=", "data", ")", "logger", ".", "debug", "(", "\"JSON response for stmt(%s): %s\"", ",", "stmt", ",", "content", ")", "return", "content" ]
Execute SQL stmt against the crate server.
[ "Execute", "SQL", "stmt", "against", "the", "crate", "server", "." ]
python
train
35.357143
delph-in/pydelphin
delphin/mrs/components.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/components.py#L556-L565
def realpred(cls, lemma, pos, sense=None): """Instantiate a Pred from its components.""" string_tokens = [lemma] if pos is not None: string_tokens.append(pos) if sense is not None: sense = str(sense) string_tokens.append(sense) predstr = '_'.join([''] + string_tokens + ['rel']) return cls(Pred.REALPRED, lemma, pos, sense, predstr)
[ "def", "realpred", "(", "cls", ",", "lemma", ",", "pos", ",", "sense", "=", "None", ")", ":", "string_tokens", "=", "[", "lemma", "]", "if", "pos", "is", "not", "None", ":", "string_tokens", ".", "append", "(", "pos", ")", "if", "sense", "is", "not", "None", ":", "sense", "=", "str", "(", "sense", ")", "string_tokens", ".", "append", "(", "sense", ")", "predstr", "=", "'_'", ".", "join", "(", "[", "''", "]", "+", "string_tokens", "+", "[", "'rel'", "]", ")", "return", "cls", "(", "Pred", ".", "REALPRED", ",", "lemma", ",", "pos", ",", "sense", ",", "predstr", ")" ]
Instantiate a Pred from its components.
[ "Instantiate", "a", "Pred", "from", "its", "components", "." ]
python
train
40.7
ninuxorg/nodeshot
nodeshot/interop/oldimporter/db.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/oldimporter/db.py#L24-L30
def allow_relation(self, obj1, obj2, **hints): """ Relations between objects are allowed between nodeshot2 objects only """ if obj1._meta.app_label != 'oldimporter' and obj2._meta.app_label != 'oldimporter': return True return None
[ "def", "allow_relation", "(", "self", ",", "obj1", ",", "obj2", ",", "*", "*", "hints", ")", ":", "if", "obj1", ".", "_meta", ".", "app_label", "!=", "'oldimporter'", "and", "obj2", ".", "_meta", ".", "app_label", "!=", "'oldimporter'", ":", "return", "True", "return", "None" ]
Relations between objects are allowed between nodeshot2 objects only
[ "Relations", "between", "objects", "are", "allowed", "between", "nodeshot2", "objects", "only" ]
python
train
39.571429
redhat-cip/dci-control-server
dci/auth_mechanism.py
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/auth_mechanism.py#L135-L149
def get_user_and_check_auth(self, username, password): """Check the combination username/password that is valid on the database. """ constraint = sql.or_( models.USERS.c.name == username, models.USERS.c.email == username ) user = self.identity_from_db(models.USERS, constraint) if user is None: raise dci_exc.DCIException('User %s does not exists.' % username, status_code=401) return user, auth.check_passwords_equal(password, user.password)
[ "def", "get_user_and_check_auth", "(", "self", ",", "username", ",", "password", ")", ":", "constraint", "=", "sql", ".", "or_", "(", "models", ".", "USERS", ".", "c", ".", "name", "==", "username", ",", "models", ".", "USERS", ".", "c", ".", "email", "==", "username", ")", "user", "=", "self", ".", "identity_from_db", "(", "models", ".", "USERS", ",", "constraint", ")", "if", "user", "is", "None", ":", "raise", "dci_exc", ".", "DCIException", "(", "'User %s does not exists.'", "%", "username", ",", "status_code", "=", "401", ")", "return", "user", ",", "auth", ".", "check_passwords_equal", "(", "password", ",", "user", ".", "password", ")" ]
Check the combination username/password that is valid on the database.
[ "Check", "the", "combination", "username", "/", "password", "that", "is", "valid", "on", "the", "database", "." ]
python
train
37.933333
gmcguire/django-db-pool
dbpool/db/backends/postgresql_psycopg2/base.py
https://github.com/gmcguire/django-db-pool/blob/d4e0aa6a150fd7bd2024e079cd3b7147ea341e63/dbpool/db/backends/postgresql_psycopg2/base.py#L85-L98
def _set_up_pool_config(self): ''' Helper to configure pool options during DatabaseWrapper initialization. ''' self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS']) self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns) self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW', pool_config_defaults['TEST_ON_BORROW']) if self._test_on_borrow: self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY', pool_config_defaults['TEST_ON_BORROW_QUERY']) else: self._test_on_borrow_query = None
[ "def", "_set_up_pool_config", "(", "self", ")", ":", "self", ".", "_max_conns", "=", "self", ".", "settings_dict", "[", "'OPTIONS'", "]", ".", "get", "(", "'MAX_CONNS'", ",", "pool_config_defaults", "[", "'MAX_CONNS'", "]", ")", "self", ".", "_min_conns", "=", "self", ".", "settings_dict", "[", "'OPTIONS'", "]", ".", "get", "(", "'MIN_CONNS'", ",", "self", ".", "_max_conns", ")", "self", ".", "_test_on_borrow", "=", "self", ".", "settings_dict", "[", "\"OPTIONS\"", "]", ".", "get", "(", "'TEST_ON_BORROW'", ",", "pool_config_defaults", "[", "'TEST_ON_BORROW'", "]", ")", "if", "self", ".", "_test_on_borrow", ":", "self", ".", "_test_on_borrow_query", "=", "self", ".", "settings_dict", "[", "\"OPTIONS\"", "]", ".", "get", "(", "'TEST_ON_BORROW_QUERY'", ",", "pool_config_defaults", "[", "'TEST_ON_BORROW_QUERY'", "]", ")", "else", ":", "self", ".", "_test_on_borrow_query", "=", "None" ]
Helper to configure pool options during DatabaseWrapper initialization.
[ "Helper", "to", "configure", "pool", "options", "during", "DatabaseWrapper", "initialization", "." ]
python
train
56.571429
h2non/pook
pook/engine.py
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L166-L173
def remove_mock(self, mock): """ Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove. """ self.mocks = [m for m in self.mocks if m is not mock]
[ "def", "remove_mock", "(", "self", ",", "mock", ")", ":", "self", ".", "mocks", "=", "[", "m", "for", "m", "in", "self", ".", "mocks", "if", "m", "is", "not", "mock", "]" ]
Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove.
[ "Removes", "a", "specific", "mock", "instance", "by", "object", "reference", "." ]
python
test
30.5
brutus/wdiffhtml
wdiffhtml/utils.py
https://github.com/brutus/wdiffhtml/blob/e97b524a7945f7a626e33ec141343120c524d9fa/wdiffhtml/utils.py#L108-L121
def wrap_content(content, settings, hard_breaks=False): """ Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags. """ settings.context['content'] = wrap_paragraphs(content, hard_breaks) template = Template(settings.template) try: return template.render(**settings.context) except KeyError as error: msg = "missing context setting: {}".format(error) raise ContextError(msg)
[ "def", "wrap_content", "(", "content", ",", "settings", ",", "hard_breaks", "=", "False", ")", ":", "settings", ".", "context", "[", "'content'", "]", "=", "wrap_paragraphs", "(", "content", ",", "hard_breaks", ")", "template", "=", "Template", "(", "settings", ".", "template", ")", "try", ":", "return", "template", ".", "render", "(", "*", "*", "settings", ".", "context", ")", "except", "KeyError", "as", "error", ":", "msg", "=", "\"missing context setting: {}\"", ".", "format", "(", "error", ")", "raise", "ContextError", "(", "msg", ")" ]
Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags.
[ "Returns", "*", "content", "*", "wrapped", "in", "a", "HTML", "structure", "." ]
python
train
32.214286
iamteem/redisco
redisco/containers.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/containers.py#L86-L88
def isdisjoint(self, other): """Return True if the set has no elements in common with other.""" return not bool(self.db.sinter([self.key, other.key]))
[ "def", "isdisjoint", "(", "self", ",", "other", ")", ":", "return", "not", "bool", "(", "self", ".", "db", ".", "sinter", "(", "[", "self", ".", "key", ",", "other", ".", "key", "]", ")", ")" ]
Return True if the set has no elements in common with other.
[ "Return", "True", "if", "the", "set", "has", "no", "elements", "in", "common", "with", "other", "." ]
python
train
54.666667
ga4gh/ga4gh-server
ga4gh/server/response_builder.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/response_builder.py#L61-L70
def addValue(self, protocolElement): """ Appends the specified protocolElement to the value list for this response. """ self._numElements += 1 self._bufferSize += protocolElement.ByteSize() attr = getattr(self._protoObject, self._valueListName) obj = attr.add() obj.CopyFrom(protocolElement)
[ "def", "addValue", "(", "self", ",", "protocolElement", ")", ":", "self", ".", "_numElements", "+=", "1", "self", ".", "_bufferSize", "+=", "protocolElement", ".", "ByteSize", "(", ")", "attr", "=", "getattr", "(", "self", ".", "_protoObject", ",", "self", ".", "_valueListName", ")", "obj", "=", "attr", ".", "add", "(", ")", "obj", ".", "CopyFrom", "(", "protocolElement", ")" ]
Appends the specified protocolElement to the value list for this response.
[ "Appends", "the", "specified", "protocolElement", "to", "the", "value", "list", "for", "this", "response", "." ]
python
train
35.4
llazzaro/django-scheduler
schedule/views.py
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/views.py#L251-L274
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): """ Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. """ if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
[ "def", "get_occurrence", "(", "event_id", ",", "occurrence_id", "=", "None", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "tzinfo", "=", "None", ")", ":", "if", "(", "occurrence_id", ")", ":", "occurrence", "=", "get_object_or_404", "(", "Occurrence", ",", "id", "=", "occurrence_id", ")", "event", "=", "occurrence", ".", "event", "elif", "None", "not", "in", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ")", ":", "event", "=", "get_object_or_404", "(", "Event", ",", "id", "=", "event_id", ")", "date", "=", "timezone", ".", "make_aware", "(", "datetime", ".", "datetime", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ",", "int", "(", "day", ")", ",", "int", "(", "hour", ")", ",", "int", "(", "minute", ")", ",", "int", "(", "second", ")", ")", ",", "tzinfo", ")", "occurrence", "=", "event", ".", "get_occurrence", "(", "date", ")", "if", "occurrence", "is", "None", ":", "raise", "Http404", "else", ":", "raise", "Http404", "return", "event", ",", "occurrence" ]
Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used.
[ "Because", "occurrences", "don", "t", "have", "to", "be", "persisted", "there", "must", "be", "two", "ways", "to", "retrieve", "them", ".", "both", "need", "an", "event", "but", "if", "its", "persisted", "the", "occurrence", "can", "be", "retrieved", "with", "an", "id", ".", "If", "it", "is", "not", "persisted", "it", "takes", "a", "date", "to", "retrieve", "it", ".", "This", "function", "returns", "an", "event", "and", "occurrence", "regardless", "of", "which", "method", "is", "used", "." ]
python
train
46.041667
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L964-L974
def get_source_metadata(self): """Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['source']) metadata.update({'existing_id_values': self._my_map['sourceId']}) return Metadata(**metadata)
[ "def", "get_source_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'source'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_id_values'", ":", "self", ".", "_my_map", "[", "'sourceId'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "the", "source", "." ]
python
train
41.545455
dogoncouch/logdissect
logdissect/utils.py
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L171-L200
def get_local_tzone(): """Get the current time zone on the local host""" if localtime().tm_isdst: if altzone < 0: tzone = '+' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: tzone = '-' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: if altzone < 0: tzone = \ '+' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') else: tzone = \ '-' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') return tzone
[ "def", "get_local_tzone", "(", ")", ":", "if", "localtime", "(", ")", ".", "tm_isdst", ":", "if", "altzone", "<", "0", ":", "tzone", "=", "'+'", "+", "str", "(", "int", "(", "float", "(", "altzone", ")", "/", "60", "//", "60", ")", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "+", "str", "(", "int", "(", "float", "(", "altzone", ")", "/", "60", "%", "60", ")", ")", ".", "ljust", "(", "2", ",", "'0'", ")", "else", ":", "tzone", "=", "'-'", "+", "str", "(", "int", "(", "float", "(", "altzone", ")", "/", "60", "//", "60", ")", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "+", "str", "(", "int", "(", "float", "(", "altzone", ")", "/", "60", "%", "60", ")", ")", ".", "ljust", "(", "2", ",", "'0'", ")", "else", ":", "if", "altzone", "<", "0", ":", "tzone", "=", "'+'", "+", "str", "(", "int", "(", "float", "(", "timezone", ")", "/", "60", "//", "60", ")", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "+", "str", "(", "int", "(", "float", "(", "timezone", ")", "/", "60", "%", "60", ")", ")", ".", "ljust", "(", "2", ",", "'0'", ")", "else", ":", "tzone", "=", "'-'", "+", "str", "(", "int", "(", "float", "(", "timezone", ")", "/", "60", "//", "60", ")", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "+", "str", "(", "int", "(", "float", "(", "timezone", ")", "/", "60", "%", "60", ")", ")", ".", "ljust", "(", "2", ",", "'0'", ")", "return", "tzone" ]
Get the current time zone on the local host
[ "Get", "the", "current", "time", "zone", "on", "the", "local", "host" ]
python
train
38.133333
numenta/htmresearch
htmresearch/frameworks/thalamus/thalamus.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L282-L292
def relayIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.relayWidth y = i / self.relayWidth return x, y
[ "def", "relayIndextoCoord", "(", "self", ",", "i", ")", ":", "x", "=", "i", "%", "self", ".", "relayWidth", "y", "=", "i", "/", "self", ".", "relayWidth", "return", "x", ",", "y" ]
Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate
[ "Map", "1D", "cell", "index", "to", "a", "2D", "coordinate" ]
python
train
20.454545
DLR-RM/RAFCON
source/rafcon/gui/controllers/utils/tree_view_controller.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L647-L721
def tree_view_keypress_callback(self, widget, event): """Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return: """ # self._logger.info("key_value: " + str(event.keyval if event is not None else '')) if event and "GDK_KEY_PRESS" == event.type.value_name \ and (event.keyval == Gdk.KEY_Tab or event.keyval == Gdk.KEY_ISO_Left_Tab): [path, focus_column] = self.tree_view.get_cursor() if not path: return False self.tree_view_keypress_callback.__func__.core_element_id = self.store[path][self.ID_STORAGE_ID] # finish active edit process if self.active_entry_widget is not None: text = self.active_entry_widget.get_buffer().get_text() if focus_column in self.widget_columns: focus_column.get_cells()[0].emit('edited', path[0], text) # row could be updated by other call_backs caused by emitting 'edited' signal but selection stays an editable neighbor path = self.get_path_for_core_element(self.tree_view_keypress_callback.__func__.core_element_id) if event.keyval == Gdk.KEY_Tab: # logger.info("move right") direction = +1 else: # logger.info("move left") direction = -1 # get next row_id for focus if direction < 0 and focus_column is self.widget_columns[0] \ or direction > 0 and focus_column is self.widget_columns[-1]: if direction < 0 < path[0] or direction > 0 and not path[0] + 1 > len(self.store): next_row = path[0] + direction else: return False else: next_row = path[0] # get next column_id for focus focus_column_id = self.widget_columns.index(focus_column) if focus_column_id is not None: # search all columns for next editable cell renderer next_focus_column_id = 0 for index in range(len(self.tree_view.get_model())): test_id = focus_column_id + direction * index + direction next_focus_column_id = test_id % len(self.widget_columns) if test_id > len(self.widget_columns) - 1 or test_id < 0: next_row = path[0] + direction if next_row < 0 or next_row > len(self.tree_view.get_model()) - 1: return False if self.widget_columns[next_focus_column_id].get_cells()[0].get_property('editable'): break else: return False del self.tree_view_keypress_callback.__func__.core_element_id # self._logger.info("self.tree_view.scroll_to_cell(next_row={0}, self.widget_columns[{1}] , use_align={2})" # "".format(next_row, next_focus_column_id, False)) # self.tree_view.scroll_to_cell(next_row, self.widget_columns[next_focus_column_id], use_align=False) self.tree_view.set_cursor_on_cell(Gtk.TreePath.new_from_indices([next_row]), self.widget_columns[ next_focus_column_id], focus_cell=None, start_editing=True) return True else: super(ListViewController, self).tree_view_keypress_callback(widget, event)
[ "def", "tree_view_keypress_callback", "(", "self", ",", "widget", ",", "event", ")", ":", "# self._logger.info(\"key_value: \" + str(event.keyval if event is not None else ''))", "if", "event", "and", "\"GDK_KEY_PRESS\"", "==", "event", ".", "type", ".", "value_name", "and", "(", "event", ".", "keyval", "==", "Gdk", ".", "KEY_Tab", "or", "event", ".", "keyval", "==", "Gdk", ".", "KEY_ISO_Left_Tab", ")", ":", "[", "path", ",", "focus_column", "]", "=", "self", ".", "tree_view", ".", "get_cursor", "(", ")", "if", "not", "path", ":", "return", "False", "self", ".", "tree_view_keypress_callback", ".", "__func__", ".", "core_element_id", "=", "self", ".", "store", "[", "path", "]", "[", "self", ".", "ID_STORAGE_ID", "]", "# finish active edit process", "if", "self", ".", "active_entry_widget", "is", "not", "None", ":", "text", "=", "self", ".", "active_entry_widget", ".", "get_buffer", "(", ")", ".", "get_text", "(", ")", "if", "focus_column", "in", "self", ".", "widget_columns", ":", "focus_column", ".", "get_cells", "(", ")", "[", "0", "]", ".", "emit", "(", "'edited'", ",", "path", "[", "0", "]", ",", "text", ")", "# row could be updated by other call_backs caused by emitting 'edited' signal but selection stays an editable neighbor", "path", "=", "self", ".", "get_path_for_core_element", "(", "self", ".", "tree_view_keypress_callback", ".", "__func__", ".", "core_element_id", ")", "if", "event", ".", "keyval", "==", "Gdk", ".", "KEY_Tab", ":", "# logger.info(\"move right\")", "direction", "=", "+", "1", "else", ":", "# logger.info(\"move left\")", "direction", "=", "-", "1", "# get next row_id for focus", "if", "direction", "<", "0", "and", "focus_column", "is", "self", ".", "widget_columns", "[", "0", "]", "or", "direction", ">", "0", "and", "focus_column", "is", "self", ".", "widget_columns", "[", "-", "1", "]", ":", "if", "direction", "<", "0", "<", "path", "[", "0", "]", "or", "direction", ">", "0", "and", "not", "path", "[", "0", "]", "+", "1", ">", "len", "(", "self", ".", "store", ")", ":", "next_row", "=", "path", "[", "0", "]", "+", "direction", "else", ":", "return", "False", "else", ":", "next_row", "=", "path", "[", "0", "]", "# get next column_id for focus", "focus_column_id", "=", "self", ".", "widget_columns", ".", "index", "(", "focus_column", ")", "if", "focus_column_id", "is", "not", "None", ":", "# search all columns for next editable cell renderer", "next_focus_column_id", "=", "0", "for", "index", "in", "range", "(", "len", "(", "self", ".", "tree_view", ".", "get_model", "(", ")", ")", ")", ":", "test_id", "=", "focus_column_id", "+", "direction", "*", "index", "+", "direction", "next_focus_column_id", "=", "test_id", "%", "len", "(", "self", ".", "widget_columns", ")", "if", "test_id", ">", "len", "(", "self", ".", "widget_columns", ")", "-", "1", "or", "test_id", "<", "0", ":", "next_row", "=", "path", "[", "0", "]", "+", "direction", "if", "next_row", "<", "0", "or", "next_row", ">", "len", "(", "self", ".", "tree_view", ".", "get_model", "(", ")", ")", "-", "1", ":", "return", "False", "if", "self", ".", "widget_columns", "[", "next_focus_column_id", "]", ".", "get_cells", "(", ")", "[", "0", "]", ".", "get_property", "(", "'editable'", ")", ":", "break", "else", ":", "return", "False", "del", "self", ".", "tree_view_keypress_callback", ".", "__func__", ".", "core_element_id", "# self._logger.info(\"self.tree_view.scroll_to_cell(next_row={0}, self.widget_columns[{1}] , use_align={2})\"", "# \"\".format(next_row, next_focus_column_id, False))", "# self.tree_view.scroll_to_cell(next_row, self.widget_columns[next_focus_column_id], use_align=False)", "self", ".", "tree_view", ".", "set_cursor_on_cell", "(", "Gtk", ".", "TreePath", ".", "new_from_indices", "(", "[", "next_row", "]", ")", ",", "self", ".", "widget_columns", "[", "next_focus_column_id", "]", ",", "focus_cell", "=", "None", ",", "start_editing", "=", "True", ")", "return", "True", "else", ":", "super", "(", "ListViewController", ",", "self", ")", ".", "tree_view_keypress_callback", "(", "widget", ",", "event", ")" ]
Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return:
[ "Tab", "back", "and", "forward", "tab", "-", "key", "motion", "in", "list", "widget", "and", "the", "scrollbar", "motion", "to", "follow", "key", "cursor", "motions" ]
python
train
57.386667
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodeconnection.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodeconnection.py#L1135-L1150
def prepareToRemove(self): """ Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success """ # disconnect the signals from the input and output nodes for node in (self._outputNode, self._inputNode): self.disconnectSignals(node) # clear the pointers to the nodes self._inputNode = None self._outputNode = None return True
[ "def", "prepareToRemove", "(", "self", ")", ":", "# disconnect the signals from the input and output nodes", "for", "node", "in", "(", "self", ".", "_outputNode", ",", "self", ".", "_inputNode", ")", ":", "self", ".", "disconnectSignals", "(", "node", ")", "# clear the pointers to the nodes", "self", ".", "_inputNode", "=", "None", "self", ".", "_outputNode", "=", "None", "return", "True" ]
Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success
[ "Handles", "any", "code", "that", "needs", "to", "run", "to", "cleanup", "the", "connection", "\\", "before", "it", "gets", "removed", "from", "the", "scene", ".", ":", "return", "<bool", ">", "success" ]
python
train
30.4375
numenta/nupic
src/nupic/algorithms/anomaly_likelihood.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/anomaly_likelihood.py#L521-L610
def updateAnomalyLikelihoods(anomalyScores, params, verbosity=0): """ Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric. """ if verbosity > 3: print("In updateAnomalyLikelihoods.") print("Number of anomaly scores:", len(anomalyScores)) print("First 20:", anomalyScores[0:min(20, len(anomalyScores))]) print("Params:", params) if len(anomalyScores) == 0: raise ValueError("Must have at least one anomalyScore") if not isValidEstimatorParams(params): raise ValueError("'params' is not a valid params structure") # For backward compatibility. if "historicalLikelihoods" not in params: params["historicalLikelihoods"] = [1.0] # Compute moving averages of these new scores using the previous values # as well as likelihood for these scores using the old estimator historicalValues = params["movingAverage"]["historicalValues"] total = params["movingAverage"]["total"] windowSize = params["movingAverage"]["windowSize"] aggRecordList = numpy.zeros(len(anomalyScores), dtype=float) likelihoods = numpy.zeros(len(anomalyScores), dtype=float) for i, v in enumerate(anomalyScores): newAverage, historicalValues, total = ( MovingAverage.compute(historicalValues, total, v[2], windowSize) ) aggRecordList[i] = newAverage likelihoods[i] = tailProbability(newAverage, params["distribution"]) # Filter the likelihood values. First we prepend the historical likelihoods # to the current set. Then we filter the values. We peel off the likelihoods # to return and the last windowSize values to store for later. likelihoods2 = params["historicalLikelihoods"] + list(likelihoods) filteredLikelihoods = _filterLikelihoods(likelihoods2) likelihoods[:] = filteredLikelihoods[-len(likelihoods):] historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):] # Update the estimator newParams = { "distribution": params["distribution"], "movingAverage": { "historicalValues": historicalValues, "total": total, "windowSize": windowSize, }, "historicalLikelihoods": historicalLikelihoods, } assert len(newParams["historicalLikelihoods"]) <= windowSize if verbosity > 3: print("Number of likelihoods:", len(likelihoods)) print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))]) print("Leaving updateAnomalyLikelihoods.") return (likelihoods, aggRecordList, newParams)
[ "def", "updateAnomalyLikelihoods", "(", "anomalyScores", ",", "params", ",", "verbosity", "=", "0", ")", ":", "if", "verbosity", ">", "3", ":", "print", "(", "\"In updateAnomalyLikelihoods.\"", ")", "print", "(", "\"Number of anomaly scores:\"", ",", "len", "(", "anomalyScores", ")", ")", "print", "(", "\"First 20:\"", ",", "anomalyScores", "[", "0", ":", "min", "(", "20", ",", "len", "(", "anomalyScores", ")", ")", "]", ")", "print", "(", "\"Params:\"", ",", "params", ")", "if", "len", "(", "anomalyScores", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Must have at least one anomalyScore\"", ")", "if", "not", "isValidEstimatorParams", "(", "params", ")", ":", "raise", "ValueError", "(", "\"'params' is not a valid params structure\"", ")", "# For backward compatibility.", "if", "\"historicalLikelihoods\"", "not", "in", "params", ":", "params", "[", "\"historicalLikelihoods\"", "]", "=", "[", "1.0", "]", "# Compute moving averages of these new scores using the previous values", "# as well as likelihood for these scores using the old estimator", "historicalValues", "=", "params", "[", "\"movingAverage\"", "]", "[", "\"historicalValues\"", "]", "total", "=", "params", "[", "\"movingAverage\"", "]", "[", "\"total\"", "]", "windowSize", "=", "params", "[", "\"movingAverage\"", "]", "[", "\"windowSize\"", "]", "aggRecordList", "=", "numpy", ".", "zeros", "(", "len", "(", "anomalyScores", ")", ",", "dtype", "=", "float", ")", "likelihoods", "=", "numpy", ".", "zeros", "(", "len", "(", "anomalyScores", ")", ",", "dtype", "=", "float", ")", "for", "i", ",", "v", "in", "enumerate", "(", "anomalyScores", ")", ":", "newAverage", ",", "historicalValues", ",", "total", "=", "(", "MovingAverage", ".", "compute", "(", "historicalValues", ",", "total", ",", "v", "[", "2", "]", ",", "windowSize", ")", ")", "aggRecordList", "[", "i", "]", "=", "newAverage", "likelihoods", "[", "i", "]", "=", "tailProbability", "(", "newAverage", ",", "params", "[", "\"distribution\"", "]", ")", "# Filter the likelihood values. First we prepend the historical likelihoods", "# to the current set. Then we filter the values. We peel off the likelihoods", "# to return and the last windowSize values to store for later.", "likelihoods2", "=", "params", "[", "\"historicalLikelihoods\"", "]", "+", "list", "(", "likelihoods", ")", "filteredLikelihoods", "=", "_filterLikelihoods", "(", "likelihoods2", ")", "likelihoods", "[", ":", "]", "=", "filteredLikelihoods", "[", "-", "len", "(", "likelihoods", ")", ":", "]", "historicalLikelihoods", "=", "likelihoods2", "[", "-", "min", "(", "windowSize", ",", "len", "(", "likelihoods2", ")", ")", ":", "]", "# Update the estimator", "newParams", "=", "{", "\"distribution\"", ":", "params", "[", "\"distribution\"", "]", ",", "\"movingAverage\"", ":", "{", "\"historicalValues\"", ":", "historicalValues", ",", "\"total\"", ":", "total", ",", "\"windowSize\"", ":", "windowSize", ",", "}", ",", "\"historicalLikelihoods\"", ":", "historicalLikelihoods", ",", "}", "assert", "len", "(", "newParams", "[", "\"historicalLikelihoods\"", "]", ")", "<=", "windowSize", "if", "verbosity", ">", "3", ":", "print", "(", "\"Number of likelihoods:\"", ",", "len", "(", "likelihoods", ")", ")", "print", "(", "\"First 20 likelihoods:\"", ",", "likelihoods", "[", "0", ":", "min", "(", "20", ",", "len", "(", "likelihoods", ")", ")", "]", ")", "print", "(", "\"Leaving updateAnomalyLikelihoods.\"", ")", "return", "(", "likelihoods", ",", "aggRecordList", ",", "newParams", ")" ]
Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric.
[ "Compute", "updated", "probabilities", "for", "anomalyScores", "using", "the", "given", "params", "." ]
python
valid
35.411111
hugapi/hug
hug/use.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/use.py#L53-L55
def request(self, method, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params): """Calls the service at the specified URL using the "CALL" method""" raise NotImplementedError("Concrete services must define the request method")
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "url_params", "=", "empty", ".", "dict", ",", "headers", "=", "empty", ".", "dict", ",", "timeout", "=", "None", ",", "*", "*", "params", ")", ":", "raise", "NotImplementedError", "(", "\"Concrete services must define the request method\"", ")" ]
Calls the service at the specified URL using the "CALL" method
[ "Calls", "the", "service", "at", "the", "specified", "URL", "using", "the", "CALL", "method" ]
python
train
86.333333
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/icc.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/icc.py#L38-L50
def generate(env): """Add Builders and construction variables for the OS/2 to an Environment.""" cc.generate(env) env['CC'] = 'icc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CPPDEFPREFIX'] = '/D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '/I' env['INCSUFFIX'] = '' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cc'
[ "def", "generate", "(", "env", ")", ":", "cc", ".", "generate", "(", "env", ")", "env", "[", "'CC'", "]", "=", "'icc'", "env", "[", "'CCCOM'", "]", "=", "'$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'", "env", "[", "'CXXCOM'", "]", "=", "'$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'", "env", "[", "'CPPDEFPREFIX'", "]", "=", "'/D'", "env", "[", "'CPPDEFSUFFIX'", "]", "=", "''", "env", "[", "'INCPREFIX'", "]", "=", "'/I'", "env", "[", "'INCSUFFIX'", "]", "=", "''", "env", "[", "'CFILESUFFIX'", "]", "=", "'.c'", "env", "[", "'CXXFILESUFFIX'", "]", "=", "'.cc'" ]
Add Builders and construction variables for the OS/2 to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "the", "OS", "/", "2", "to", "an", "Environment", "." ]
python
train
40.846154
edeposit/edeposit.amqp.pdfgen
src/edeposit/amqp/pdfgen/specialization.py
https://github.com/edeposit/edeposit.amqp.pdfgen/blob/1022d6d01196f4928d664a71e49273c2d8c67e63/src/edeposit/amqp/pdfgen/specialization.py#L43-L85
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
[ "def", "get_contract", "(", "firma", ",", "pravni_forma", ",", "sidlo", ",", "ic", ",", "dic", ",", "zastoupen", ")", ":", "contract_fn", "=", "_resource_context", "(", "\"Licencni_smlouva_o_dodavani_elektronickych_publikaci\"", "\"_a_jejich_uziti.rst\"", ")", "# load contract", "with", "open", "(", "contract_fn", ")", "as", "f", ":", "contract", "=", "f", ".", "read", "(", ")", "#.decode(\"utf-8\").encode(\"utf-8\")", "# make sure that `firma` has its heading mark", "firma", "=", "firma", ".", "strip", "(", ")", "firma", "=", "firma", "+", "\"\\n\"", "+", "(", "(", "len", "(", "firma", ")", "+", "1", ")", "*", "\"-\"", ")", "# patch template", "contract", "=", "Template", "(", "contract", ")", ".", "substitute", "(", "firma", "=", "firma", ",", "pravni_forma", "=", "pravni_forma", ".", "strip", "(", ")", ",", "sidlo", "=", "sidlo", ".", "strip", "(", ")", ",", "ic", "=", "ic", ".", "strip", "(", ")", ",", "dic", "=", "dic", ".", "strip", "(", ")", ",", "zastoupen", "=", "zastoupen", ".", "strip", "(", ")", ",", "resources_path", "=", "RES_PATH", ")", "return", "gen_pdf", "(", "contract", ",", "open", "(", "_resource_context", "(", "\"style.json\"", ")", ")", ".", "read", "(", ")", ",", ")" ]
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
[ "Compose", "contract", "and", "create", "PDF", "." ]
python
train
25.465116
shrubberysoft/django-future
src/django_future/models.py
https://github.com/shrubberysoft/django-future/blob/5e19f68fc5a5e1c00297222067697182c7ecd94a/src/django_future/models.py#L79-L98
def reschedule(self, date, callable_name=None, content_object=None, expires='7d', args=None, kwargs=None): """Schedule a clone of this job.""" # Resolve date relative to the expected start of the current job. if isinstance(date, basestring): date = parse_timedelta(date) if isinstance(date, datetime.timedelta): date = self.time_slot_start + date if callable_name is None: callable_name = self.callable_name if content_object is None: content_object = self.content_object if args is None: args = self.args or [] if kwargs is None: kwargs = self.kwargs or {} from django_future import schedule_job return schedule_job(date, callable_name, content_object=content_object, expires=expires, args=args, kwargs=kwargs)
[ "def", "reschedule", "(", "self", ",", "date", ",", "callable_name", "=", "None", ",", "content_object", "=", "None", ",", "expires", "=", "'7d'", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "# Resolve date relative to the expected start of the current job.", "if", "isinstance", "(", "date", ",", "basestring", ")", ":", "date", "=", "parse_timedelta", "(", "date", ")", "if", "isinstance", "(", "date", ",", "datetime", ".", "timedelta", ")", ":", "date", "=", "self", ".", "time_slot_start", "+", "date", "if", "callable_name", "is", "None", ":", "callable_name", "=", "self", ".", "callable_name", "if", "content_object", "is", "None", ":", "content_object", "=", "self", ".", "content_object", "if", "args", "is", "None", ":", "args", "=", "self", ".", "args", "or", "[", "]", "if", "kwargs", "is", "None", ":", "kwargs", "=", "self", ".", "kwargs", "or", "{", "}", "from", "django_future", "import", "schedule_job", "return", "schedule_job", "(", "date", ",", "callable_name", ",", "content_object", "=", "content_object", ",", "expires", "=", "expires", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")" ]
Schedule a clone of this job.
[ "Schedule", "a", "clone", "of", "this", "job", "." ]
python
train
44.6
graphql-python/graphql-core-next
graphql/pyutils/suggestion_list.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/pyutils/suggestion_list.py#L6-L21
def suggestion_list(input_: str, options: Collection[str]): """Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. """ options_by_distance = {} input_threshold = len(input_) // 2 for option in options: distance = lexical_distance(input_, option) threshold = max(input_threshold, len(option) // 2, 1) if distance <= threshold: options_by_distance[option] = distance return sorted(options_by_distance, key=options_by_distance.get)
[ "def", "suggestion_list", "(", "input_", ":", "str", ",", "options", ":", "Collection", "[", "str", "]", ")", ":", "options_by_distance", "=", "{", "}", "input_threshold", "=", "len", "(", "input_", ")", "//", "2", "for", "option", "in", "options", ":", "distance", "=", "lexical_distance", "(", "input_", ",", "option", ")", "threshold", "=", "max", "(", "input_threshold", ",", "len", "(", "option", ")", "//", "2", ",", "1", ")", "if", "distance", "<=", "threshold", ":", "options_by_distance", "[", "option", "]", "=", "distance", "return", "sorted", "(", "options_by_distance", ",", "key", "=", "options_by_distance", ".", "get", ")" ]
Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input.
[ "Get", "list", "with", "suggestions", "for", "a", "given", "input", "." ]
python
train
39
gebn/nibble
nibble/expression/lexer.py
https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/expression/lexer.py#L47-L63
def t_ID(self, t): r'[a-zA-Z]+' if t.value in self._RESERVED.keys(): t.type = self._RESERVED[t.value] return t if Information.is_valid_symbol(t.value) or \ Information.is_valid_category(t.value): t.type = self._INFORMATION_UNIT return t if Duration.is_valid_symbol(t.value): t.type = self._DURATION_UNIT return t raise LexingError('Unrecognised token or unit \'{0.value}\' at ' 'position {0.lexpos}'.format(t))
[ "def", "t_ID", "(", "self", ",", "t", ")", ":", "if", "t", ".", "value", "in", "self", ".", "_RESERVED", ".", "keys", "(", ")", ":", "t", ".", "type", "=", "self", ".", "_RESERVED", "[", "t", ".", "value", "]", "return", "t", "if", "Information", ".", "is_valid_symbol", "(", "t", ".", "value", ")", "or", "Information", ".", "is_valid_category", "(", "t", ".", "value", ")", ":", "t", ".", "type", "=", "self", ".", "_INFORMATION_UNIT", "return", "t", "if", "Duration", ".", "is_valid_symbol", "(", "t", ".", "value", ")", ":", "t", ".", "type", "=", "self", ".", "_DURATION_UNIT", "return", "t", "raise", "LexingError", "(", "'Unrecognised token or unit \\'{0.value}\\' at '", "'position {0.lexpos}'", ".", "format", "(", "t", ")", ")" ]
r'[a-zA-Z]+
[ "r", "[", "a", "-", "zA", "-", "Z", "]", "+" ]
python
train
32.411765
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L776-L782
def get_user_events(self, id, **data): """ GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to """ return self.get("/users/{0}/events/".format(id), data=data)
[ "def", "get_user_events", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "get", "(", "\"/users/{0}/events/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to
[ "GET", "/", "users", "/", ":", "id", "/", "events", "/", "Returns", "a", ":", "ref", ":", "paginated", "<pagination", ">", "response", "of", ":", "format", ":", "events", "<event", ">", "under", "the", "key", "events", "of", "all", "events", "the", "user", "has", "access", "to" ]
python
train
44.714286
rameshg87/pyremotevbox
pyremotevbox/ZSI/writer.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/writer.py#L59-L81
def serialize_header(self, pyobj, typecode=None, **kw): '''Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode ''' kw['unique'] = True soap_env = _reserved_ns['SOAP-ENV'] #header = self.dom.getElement(soap_env, 'Header') header = self._header if header is None: header = self._header = self.dom.createAppendElement(soap_env, 'Header') typecode = getattr(pyobj, 'typecode', typecode) if typecode is None: raise RuntimeError( 'typecode is required to serialize pyobj in header') helt = typecode.serialize(header, self, pyobj, **kw)
[ "def", "serialize_header", "(", "self", ",", "pyobj", ",", "typecode", "=", "None", ",", "*", "*", "kw", ")", ":", "kw", "[", "'unique'", "]", "=", "True", "soap_env", "=", "_reserved_ns", "[", "'SOAP-ENV'", "]", "#header = self.dom.getElement(soap_env, 'Header')", "header", "=", "self", ".", "_header", "if", "header", "is", "None", ":", "header", "=", "self", ".", "_header", "=", "self", ".", "dom", ".", "createAppendElement", "(", "soap_env", ",", "'Header'", ")", "typecode", "=", "getattr", "(", "pyobj", ",", "'typecode'", ",", "typecode", ")", "if", "typecode", "is", "None", ":", "raise", "RuntimeError", "(", "'typecode is required to serialize pyobj in header'", ")", "helt", "=", "typecode", ".", "serialize", "(", "header", ",", "self", ",", "pyobj", ",", "*", "*", "kw", ")" ]
Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode
[ "Serialize", "a", "Python", "object", "in", "SOAP", "-", "ENV", ":", "Header", "make", "sure", "everything", "in", "Header", "unique", "(", "no", "#href", ")", ".", "Must", "call", "serialize", "first", "to", "create", "a", "document", "." ]
python
train
40.434783
djgagne/hagelslag
hagelslag/processing/ObjectMatcher.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/ObjectMatcher.py#L190-L224
def match(self, set_a, set_b): """ For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame """ track_step_matches = [[] * len(set_a)] costs = self.cost_matrix(set_a, set_b) valid_costs = np.all(costs < 1, axis=2) set_a_matches, set_b_matches = np.where(valid_costs) s = 0 track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]), columns=["Track", "Step", "Time", "Matched", "Pairings"], dtype=object) set_b_info = [] for trb, track_b in enumerate(set_b): for t, time in enumerate(track_b.times): set_b_info.append((trb, t)) set_b_info_arr = np.array(set_b_info, dtype=int) for tr, track_a in enumerate(set_a): for t, time in enumerate(track_a.times): track_pairings.loc[s, ["Track", "Step", "Time"]] = [tr, t, time] track_pairings.loc[s, "Matched"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0 if track_pairings.loc[s, "Matched"] == 1: track_pairings.loc[s, "Pairings"] = set_b_info_arr[set_b_matches[set_a_matches == s]] else: track_pairings.loc[s, "Pairings"] = np.array([]) s += 1 return track_pairings
[ "def", "match", "(", "self", ",", "set_a", ",", "set_b", ")", ":", "track_step_matches", "=", "[", "[", "]", "*", "len", "(", "set_a", ")", "]", "costs", "=", "self", ".", "cost_matrix", "(", "set_a", ",", "set_b", ")", "valid_costs", "=", "np", ".", "all", "(", "costs", "<", "1", ",", "axis", "=", "2", ")", "set_a_matches", ",", "set_b_matches", "=", "np", ".", "where", "(", "valid_costs", ")", "s", "=", "0", "track_pairings", "=", "pd", ".", "DataFrame", "(", "index", "=", "np", ".", "arange", "(", "costs", ".", "shape", "[", "0", "]", ")", ",", "columns", "=", "[", "\"Track\"", ",", "\"Step\"", ",", "\"Time\"", ",", "\"Matched\"", ",", "\"Pairings\"", "]", ",", "dtype", "=", "object", ")", "set_b_info", "=", "[", "]", "for", "trb", ",", "track_b", "in", "enumerate", "(", "set_b", ")", ":", "for", "t", ",", "time", "in", "enumerate", "(", "track_b", ".", "times", ")", ":", "set_b_info", ".", "append", "(", "(", "trb", ",", "t", ")", ")", "set_b_info_arr", "=", "np", ".", "array", "(", "set_b_info", ",", "dtype", "=", "int", ")", "for", "tr", ",", "track_a", "in", "enumerate", "(", "set_a", ")", ":", "for", "t", ",", "time", "in", "enumerate", "(", "track_a", ".", "times", ")", ":", "track_pairings", ".", "loc", "[", "s", ",", "[", "\"Track\"", ",", "\"Step\"", ",", "\"Time\"", "]", "]", "=", "[", "tr", ",", "t", ",", "time", "]", "track_pairings", ".", "loc", "[", "s", ",", "\"Matched\"", "]", "=", "1", "if", "np", ".", "count_nonzero", "(", "set_a_matches", "==", "s", ")", ">", "0", "else", "0", "if", "track_pairings", ".", "loc", "[", "s", ",", "\"Matched\"", "]", "==", "1", ":", "track_pairings", ".", "loc", "[", "s", ",", "\"Pairings\"", "]", "=", "set_b_info_arr", "[", "set_b_matches", "[", "set_a_matches", "==", "s", "]", "]", "else", ":", "track_pairings", ".", "loc", "[", "s", ",", "\"Pairings\"", "]", "=", "np", ".", "array", "(", "[", "]", ")", "s", "+=", "1", "return", "track_pairings" ]
For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame
[ "For", "each", "step", "in", "each", "track", "from", "set_a", "identify", "all", "steps", "in", "all", "tracks", "from", "set_b", "that", "meet", "all", "cost", "function", "criteria", "Args", ":", "set_a", ":", "List", "of", "STObjects", "set_b", ":", "List", "of", "STObjects" ]
python
train
43.771429
napalm-automation/napalm-junos
napalm_junos/junos.py
https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L244-L248
def discard_config(self): """Discard changes (rollback 0).""" self.device.cu.rollback(rb_id=0) if not self.config_lock: self._unlock()
[ "def", "discard_config", "(", "self", ")", ":", "self", ".", "device", ".", "cu", ".", "rollback", "(", "rb_id", "=", "0", ")", "if", "not", "self", ".", "config_lock", ":", "self", ".", "_unlock", "(", ")" ]
Discard changes (rollback 0).
[ "Discard", "changes", "(", "rollback", "0", ")", "." ]
python
train
33.2
olitheolix/qtmacs
qtmacs/logging_handler.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/logging_handler.py#L127-L165
def fetch(self, start=None, stop=None): """ Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None** """ # Set defaults if no explicit indices were provided. if not start: start = 0 if not stop: stop = len(self.log) # Sanity check: indices must be valid. if start < 0: start = 0 if stop > len(self.log): stop = len(self.log) # Clear the fetch flag. It will be set again in the emit() # method once new data arrives. self.waitForFetch = False # Return the specified range of log records. return self.log[start:stop]
[ "def", "fetch", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "# Set defaults if no explicit indices were provided.", "if", "not", "start", ":", "start", "=", "0", "if", "not", "stop", ":", "stop", "=", "len", "(", "self", ".", "log", ")", "# Sanity check: indices must be valid.", "if", "start", "<", "0", ":", "start", "=", "0", "if", "stop", ">", "len", "(", "self", ".", "log", ")", ":", "stop", "=", "len", "(", "self", ".", "log", ")", "# Clear the fetch flag. It will be set again in the emit()", "# method once new data arrives.", "self", ".", "waitForFetch", "=", "False", "# Return the specified range of log records.", "return", "self", ".", "log", "[", "start", ":", "stop", "]" ]
Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None**
[ "Fetch", "log", "records", "and", "return", "them", "as", "a", "list", "." ]
python
train
25.538462
zaturox/glin
glin/app.py
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/app.py#L283-L287
def publish_scene_remove(self, scene_id): """publish the removal of a scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_remove(self.sequence_number, scene_id)) return self.sequence_number
[ "def", "publish_scene_remove", "(", "self", ",", "scene_id", ")", ":", "self", ".", "sequence_number", "+=", "1", "self", ".", "publisher", ".", "send_multipart", "(", "msgs", ".", "MessageBuilder", ".", "scene_remove", "(", "self", ".", "sequence_number", ",", "scene_id", ")", ")", "return", "self", ".", "sequence_number" ]
publish the removal of a scene
[ "publish", "the", "removal", "of", "a", "scene" ]
python
train
51.2
casouri/launchdman
launchdman/__init__.py
https://github.com/casouri/launchdman/blob/c83840e640cb075fab2534049f1e25fac6933c64/launchdman/__init__.py#L309-L319
def add(self, *value): '''convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added ''' flattenedValueList = list(flatten(value)) return self._add(flattenedValueList, self.value)
[ "def", "add", "(", "self", ",", "*", "value", ")", ":", "flattenedValueList", "=", "list", "(", "flatten", "(", "value", ")", ")", "return", "self", ".", "_add", "(", "flattenedValueList", ",", "self", ".", "value", ")" ]
convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added
[ "convert", "value", "and", "add", "to", "self", ".", "value" ]
python
train
39.454545
cuducos/getgist
getgist/local.py
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L43-L53
def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)
[ "def", "backup", "(", "self", ")", ":", "count", "=", "0", "name", "=", "\"{}.bkp\"", ".", "format", "(", "self", ".", "filename", ")", "backup", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "name", ")", "while", "os", ".", "path", ".", "exists", "(", "backup", ")", ":", "count", "+=", "1", "name", "=", "\"{}.bkp{}\"", ".", "format", "(", "self", ".", "filename", ",", "count", ")", "backup", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "name", ")", "self", ".", "hey", "(", "\"Moving existing {} to {}\"", ".", "format", "(", "self", ".", "filename", ",", "name", ")", ")", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "self", ".", "filename", ")", ",", "backup", ")" ]
Backups files with the same name of the instance filename
[ "Backups", "files", "with", "the", "same", "name", "of", "the", "instance", "filename" ]
python
train
45.181818