repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
cloudsight/cloudsight-python
cloudsight/api.py
https://github.com/cloudsight/cloudsight-python/blob/f9bb43dfd468d5f5d50cc89bfcfb12d5c4abdb1e/cloudsight/api.py#L161-L179
def repost(self, token): """ Repost the job if it has timed out (:py:data:`cloudsight.STATUS_TIMEOUT`). :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request` """ url = '%s/%s/repost' % (REQUESTS_URL, token) response = requests.post(url, headers={ 'Authorization': self.auth.authorize('POST', url), 'User-Agent': USER_AGENT, }) if response.status_code == 200: return return self._unwrap_error(response)
[ "def", "repost", "(", "self", ",", "token", ")", ":", "url", "=", "'%s/%s/repost'", "%", "(", "REQUESTS_URL", ",", "token", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "{", "'Authorization'", ":", "self", ".", "auth", ".", "authorize", "(", "'POST'", ",", "url", ")", ",", "'User-Agent'", ":", "USER_AGENT", ",", "}", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "return", "self", ".", "_unwrap_error", "(", "response", ")" ]
Repost the job if it has timed out (:py:data:`cloudsight.STATUS_TIMEOUT`). :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request`
[ "Repost", "the", "job", "if", "it", "has", "timed", "out", "(", ":", "py", ":", "data", ":", "cloudsight", ".", "STATUS_TIMEOUT", ")", "." ]
python
train
33
PyconUK/ConferenceScheduler
src/conference_scheduler/converter.py
https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/converter.py#L43-L66
def solution_to_schedule(solution, events, slots): """Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem` """ return [ ScheduledItem( event=events[item[0]], slot=slots[item[1]] ) for item in solution ]
[ "def", "solution_to_schedule", "(", "solution", ",", "events", ",", "slots", ")", ":", "return", "[", "ScheduledItem", "(", "event", "=", "events", "[", "item", "[", "0", "]", "]", ",", "slot", "=", "slots", "[", "item", "[", "1", "]", "]", ")", "for", "item", "in", "solution", "]" ]
Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem`
[ "Convert", "a", "schedule", "from", "solution", "to", "schedule", "form" ]
python
train
25.958333
mikeboers/sitetools
sitetools/utils.py
https://github.com/mikeboers/sitetools/blob/1ec4eea6902b4a276f868a711b783dd965c123b7/sitetools/utils.py#L33-L42
def unique_list(input_, key=lambda x:x): """Return the unique elements from the input, in order.""" seen = set() output = [] for x in input_: keyx = key(x) if keyx not in seen: seen.add(keyx) output.append(x) return output
[ "def", "unique_list", "(", "input_", ",", "key", "=", "lambda", "x", ":", "x", ")", ":", "seen", "=", "set", "(", ")", "output", "=", "[", "]", "for", "x", "in", "input_", ":", "keyx", "=", "key", "(", "x", ")", "if", "keyx", "not", "in", "seen", ":", "seen", ".", "add", "(", "keyx", ")", "output", ".", "append", "(", "x", ")", "return", "output" ]
Return the unique elements from the input, in order.
[ "Return", "the", "unique", "elements", "from", "the", "input", "in", "order", "." ]
python
train
27.3
saltstack/salt
salt/modules/zk_concurrency.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zk_concurrency.py#L267-L320
def unlock(path, zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example) identifier=None, max_concurrency=1, ephemeral_lease=False, scheme=None, profile=None, username=None, password=None, default_acl=None ): ''' Remove lease from semaphore path The path in zookeeper where the lock is zk_hosts zookeeper connect string identifier Name to identify this minion, if unspecified defaults to hostname max_concurrency Maximum number of lock holders timeout timeout to wait for the lock. A None timeout will block forever ephemeral_lease Whether the locks in zookeper should be ephemeral Example: .. code-block: bash salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234 ''' # if someone passed in zk_hosts, and the path isn't in __context__['semaphore_map'], lets # see if we can find it zk = _get_zk_conn(profile=profile, hosts=zk_hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) if path not in __context__['semaphore_map']: __context__['semaphore_map'][path] = _Semaphore(zk, path, identifier, max_leases=max_concurrency, ephemeral_lease=ephemeral_lease) if path in __context__['semaphore_map']: __context__['semaphore_map'][path].release() del __context__['semaphore_map'][path] return True else: logging.error('Unable to find lease for path %s', path) return False
[ "def", "unlock", "(", "path", ",", "zk_hosts", "=", "None", ",", "# in case you need to unlock without having run lock (failed execution for example)", "identifier", "=", "None", ",", "max_concurrency", "=", "1", ",", "ephemeral_lease", "=", "False", ",", "scheme", "=", "None", ",", "profile", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "default_acl", "=", "None", ")", ":", "# if someone passed in zk_hosts, and the path isn't in __context__['semaphore_map'], lets", "# see if we can find it", "zk", "=", "_get_zk_conn", "(", "profile", "=", "profile", ",", "hosts", "=", "zk_hosts", ",", "scheme", "=", "scheme", ",", "username", "=", "username", ",", "password", "=", "password", ",", "default_acl", "=", "default_acl", ")", "if", "path", "not", "in", "__context__", "[", "'semaphore_map'", "]", ":", "__context__", "[", "'semaphore_map'", "]", "[", "path", "]", "=", "_Semaphore", "(", "zk", ",", "path", ",", "identifier", ",", "max_leases", "=", "max_concurrency", ",", "ephemeral_lease", "=", "ephemeral_lease", ")", "if", "path", "in", "__context__", "[", "'semaphore_map'", "]", ":", "__context__", "[", "'semaphore_map'", "]", "[", "path", "]", ".", "release", "(", ")", "del", "__context__", "[", "'semaphore_map'", "]", "[", "path", "]", "return", "True", "else", ":", "logging", ".", "error", "(", "'Unable to find lease for path %s'", ",", "path", ")", "return", "False" ]
Remove lease from semaphore path The path in zookeeper where the lock is zk_hosts zookeeper connect string identifier Name to identify this minion, if unspecified defaults to hostname max_concurrency Maximum number of lock holders timeout timeout to wait for the lock. A None timeout will block forever ephemeral_lease Whether the locks in zookeper should be ephemeral Example: .. code-block: bash salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234
[ "Remove", "lease", "from", "semaphore" ]
python
train
32.018519
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L2178-L2212
def assign_objective_requisites(self, objective_id=None, requisite_objective_ids=None): """Creates a requirement dependency between Objective + a list of objectives. NON-standard method impl by cjshaw arg: objective_id (osid.id.Id): the Id of the dependent Objective arg: requisite_objective_id (osid.id.Id): the Id of the required Objective raise: AlreadyExists - objective_id already mapped to requisite_objective_id raise: NotFound - objective_id or requisite_objective_id not found raise: NullArgument - objective_id or requisite_objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ if objective_id is None or requisite_objective_ids is None: raise NullArgument() ors = ObjectiveRequisiteSession(self._objective_bank_id, runtime=self._runtime) ids_arg = {'ids': [str(i) for i in requisite_objective_ids]} url_path = construct_url('requisiteids', bank_id=self._catalog_idstr, obj_id=objective_id) try: result = self._put_request(url_path, ids_arg) except Exception: raise id_list = list() for identifier in result['ids']: id_list.append(Id(idstr=identifier)) return id_objects.IdList(id_list)
[ "def", "assign_objective_requisites", "(", "self", ",", "objective_id", "=", "None", ",", "requisite_objective_ids", "=", "None", ")", ":", "if", "objective_id", "is", "None", "or", "requisite_objective_ids", "is", "None", ":", "raise", "NullArgument", "(", ")", "ors", "=", "ObjectiveRequisiteSession", "(", "self", ".", "_objective_bank_id", ",", "runtime", "=", "self", ".", "_runtime", ")", "ids_arg", "=", "{", "'ids'", ":", "[", "str", "(", "i", ")", "for", "i", "in", "requisite_objective_ids", "]", "}", "url_path", "=", "construct_url", "(", "'requisiteids'", ",", "bank_id", "=", "self", ".", "_catalog_idstr", ",", "obj_id", "=", "objective_id", ")", "try", ":", "result", "=", "self", ".", "_put_request", "(", "url_path", ",", "ids_arg", ")", "except", "Exception", ":", "raise", "id_list", "=", "list", "(", ")", "for", "identifier", "in", "result", "[", "'ids'", "]", ":", "id_list", ".", "append", "(", "Id", "(", "idstr", "=", "identifier", ")", ")", "return", "id_objects", ".", "IdList", "(", "id_list", ")" ]
Creates a requirement dependency between Objective + a list of objectives. NON-standard method impl by cjshaw arg: objective_id (osid.id.Id): the Id of the dependent Objective arg: requisite_objective_id (osid.id.Id): the Id of the required Objective raise: AlreadyExists - objective_id already mapped to requisite_objective_id raise: NotFound - objective_id or requisite_objective_id not found raise: NullArgument - objective_id or requisite_objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
[ "Creates", "a", "requirement", "dependency", "between", "Objective", "+", "a", "list", "of", "objectives", ".", "NON", "-", "standard", "method", "impl", "by", "cjshaw" ]
python
train
44.514286
hvac/hvac
hvac/v1/__init__.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/v1/__init__.py#L1663-L1674
def transit_delete_key(self, name, mount_point='transit'): """DELETE /<mount_point>/keys/<name> :param name: :type name: :param mount_point: :type mount_point: :return: :rtype: """ url = '/v1/{0}/keys/{1}'.format(mount_point, name) return self._adapter.delete(url)
[ "def", "transit_delete_key", "(", "self", ",", "name", ",", "mount_point", "=", "'transit'", ")", ":", "url", "=", "'/v1/{0}/keys/{1}'", ".", "format", "(", "mount_point", ",", "name", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", ")" ]
DELETE /<mount_point>/keys/<name> :param name: :type name: :param mount_point: :type mount_point: :return: :rtype:
[ "DELETE", "/", "<mount_point", ">", "/", "keys", "/", "<name", ">" ]
python
train
27.833333
gbiggs/rtctree
rtctree/exec_context.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/exec_context.py#L152-L161
def kind(self): '''The kind of this execution context.''' with self._mutex: kind = self._obj.get_kind() if kind == RTC.PERIODIC: return self.PERIODIC elif kind == RTC.EVENT_DRIVEN: return self.EVENT_DRIVEN else: return self.OTHER
[ "def", "kind", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "kind", "=", "self", ".", "_obj", ".", "get_kind", "(", ")", "if", "kind", "==", "RTC", ".", "PERIODIC", ":", "return", "self", ".", "PERIODIC", "elif", "kind", "==", "RTC", ".", "EVENT_DRIVEN", ":", "return", "self", ".", "EVENT_DRIVEN", "else", ":", "return", "self", ".", "OTHER" ]
The kind of this execution context.
[ "The", "kind", "of", "this", "execution", "context", "." ]
python
train
33.2
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L316-L331
def simxGetVisionSensorDepthBuffer(clientID, sensorHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' c_buffer = ct.POINTER(ct.c_float)() resolution = (ct.c_int*2)() ret = c_GetVisionSensorDepthBuffer(clientID, sensorHandle, resolution, ct.byref(c_buffer), operationMode) reso = [] buffer = [] if (ret == 0): buffer = [None]*resolution[0]*resolution[1] for i in range(resolution[0] * resolution[1]): buffer[i] = c_buffer[i] for i in range(2): reso.append(resolution[i]) return ret, reso, buffer
[ "def", "simxGetVisionSensorDepthBuffer", "(", "clientID", ",", "sensorHandle", ",", "operationMode", ")", ":", "c_buffer", "=", "ct", ".", "POINTER", "(", "ct", ".", "c_float", ")", "(", ")", "resolution", "=", "(", "ct", ".", "c_int", "*", "2", ")", "(", ")", "ret", "=", "c_GetVisionSensorDepthBuffer", "(", "clientID", ",", "sensorHandle", ",", "resolution", ",", "ct", ".", "byref", "(", "c_buffer", ")", ",", "operationMode", ")", "reso", "=", "[", "]", "buffer", "=", "[", "]", "if", "(", "ret", "==", "0", ")", ":", "buffer", "=", "[", "None", "]", "*", "resolution", "[", "0", "]", "*", "resolution", "[", "1", "]", "for", "i", "in", "range", "(", "resolution", "[", "0", "]", "*", "resolution", "[", "1", "]", ")", ":", "buffer", "[", "i", "]", "=", "c_buffer", "[", "i", "]", "for", "i", "in", "range", "(", "2", ")", ":", "reso", ".", "append", "(", "resolution", "[", "i", "]", ")", "return", "ret", ",", "reso", ",", "buffer" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
39.6875
nornir-automation/nornir
nornir/core/connections.py
https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/core/connections.py#L85-L98
def deregister(cls, name: str) -> None: """Deregisters a registered connection plugin by its name Args: name: name of the connection plugin to deregister Raises: :obj:`nornir.core.exceptions.ConnectionPluginNotRegistered` """ if name not in cls.available: raise ConnectionPluginNotRegistered( f"Connection {name!r} is not registered" ) cls.available.pop(name)
[ "def", "deregister", "(", "cls", ",", "name", ":", "str", ")", "->", "None", ":", "if", "name", "not", "in", "cls", ".", "available", ":", "raise", "ConnectionPluginNotRegistered", "(", "f\"Connection {name!r} is not registered\"", ")", "cls", ".", "available", ".", "pop", "(", "name", ")" ]
Deregisters a registered connection plugin by its name Args: name: name of the connection plugin to deregister Raises: :obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`
[ "Deregisters", "a", "registered", "connection", "plugin", "by", "its", "name" ]
python
train
32.857143
neptune-ml/steppy
steppy/adapter.py
https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/adapter.py#L106-L122
def adapt(self, all_ouputs: AllOutputs) -> DataPacket: """Adapt inputs for the transformer included in the step. Args: all_ouputs: Dict of outputs from parent steps. The keys should match the names of these steps and the values should be their respective outputs. Returns: Dictionary with the same keys as `adapting_recipes` and values constructed according to the respective recipes. """ adapted = {} for name, recipe in self.adapting_recipes.items(): adapted[name] = self._construct(all_ouputs, recipe) return adapted
[ "def", "adapt", "(", "self", ",", "all_ouputs", ":", "AllOutputs", ")", "->", "DataPacket", ":", "adapted", "=", "{", "}", "for", "name", ",", "recipe", "in", "self", ".", "adapting_recipes", ".", "items", "(", ")", ":", "adapted", "[", "name", "]", "=", "self", ".", "_construct", "(", "all_ouputs", ",", "recipe", ")", "return", "adapted" ]
Adapt inputs for the transformer included in the step. Args: all_ouputs: Dict of outputs from parent steps. The keys should match the names of these steps and the values should be their respective outputs. Returns: Dictionary with the same keys as `adapting_recipes` and values constructed according to the respective recipes.
[ "Adapt", "inputs", "for", "the", "transformer", "included", "in", "the", "step", "." ]
python
train
37.764706
abseil/abseil-py
absl/flags/_flagvalues.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_flagvalues.py#L290-L314
def find_module_defining_flag(self, flagname, default=None): """Return the name of the module defining this flag, or default. Args: flagname: str, name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The name of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default. """ registered_flag = self._flags().get(flagname) if registered_flag is None: return default for module, flags in six.iteritems(self.flags_by_module_dict()): for flag in flags: # It must compare the flag with the one in _flags. This is because a # flag might be overridden only for its long name (or short name), # and only its short name (or long name) is considered registered. if (flag.name == registered_flag.name and flag.short_name == registered_flag.short_name): return module return default
[ "def", "find_module_defining_flag", "(", "self", ",", "flagname", ",", "default", "=", "None", ")", ":", "registered_flag", "=", "self", ".", "_flags", "(", ")", ".", "get", "(", "flagname", ")", "if", "registered_flag", "is", "None", ":", "return", "default", "for", "module", ",", "flags", "in", "six", ".", "iteritems", "(", "self", ".", "flags_by_module_dict", "(", ")", ")", ":", "for", "flag", "in", "flags", ":", "# It must compare the flag with the one in _flags. This is because a", "# flag might be overridden only for its long name (or short name),", "# and only its short name (or long name) is considered registered.", "if", "(", "flag", ".", "name", "==", "registered_flag", ".", "name", "and", "flag", ".", "short_name", "==", "registered_flag", ".", "short_name", ")", ":", "return", "module", "return", "default" ]
Return the name of the module defining this flag, or default. Args: flagname: str, name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The name of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default.
[ "Return", "the", "name", "of", "the", "module", "defining", "this", "flag", "or", "default", "." ]
python
train
40.64
kadrlica/pymodeler
pymodeler/parameter.py
https://github.com/kadrlica/pymodeler/blob/f426c01416fd4b8fc3afeeb6d3b5d1cb0cb8f8e3/pymodeler/parameter.py#L627-L641
def set(self, **kwargs): """Set the value,bounds,free,errors based on corresponding kwargs The invokes hooks for type-checking and bounds-checking that may be implemented by sub-classes. """ # Probably want to reset bounds if set fails if 'bounds' in kwargs: self.set_bounds(kwargs.pop('bounds')) if 'free' in kwargs: self.set_free(kwargs.pop('free')) if 'errors' in kwargs: self.set_errors(kwargs.pop('errors')) if 'value' in kwargs: self.set_value(kwargs.pop('value'))
[ "def", "set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Probably want to reset bounds if set fails", "if", "'bounds'", "in", "kwargs", ":", "self", ".", "set_bounds", "(", "kwargs", ".", "pop", "(", "'bounds'", ")", ")", "if", "'free'", "in", "kwargs", ":", "self", ".", "set_free", "(", "kwargs", ".", "pop", "(", "'free'", ")", ")", "if", "'errors'", "in", "kwargs", ":", "self", ".", "set_errors", "(", "kwargs", ".", "pop", "(", "'errors'", ")", ")", "if", "'value'", "in", "kwargs", ":", "self", ".", "set_value", "(", "kwargs", ".", "pop", "(", "'value'", ")", ")" ]
Set the value,bounds,free,errors based on corresponding kwargs The invokes hooks for type-checking and bounds-checking that may be implemented by sub-classes.
[ "Set", "the", "value", "bounds", "free", "errors", "based", "on", "corresponding", "kwargs" ]
python
test
38.466667
tanghaibao/goatools
goatools/wr_tbl_class.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl_class.py#L55-L62
def wr_hdrs(self, worksheet, row_idx): """Print row of column headers""" for col_idx, hdr in enumerate(self.hdrs): # print("ROW({R}) COL({C}) HDR({H}) FMT({F})\n".format( # R=row_idx, C=col_idx, H=hdr, F=self.fmt_hdr)) worksheet.write(row_idx, col_idx, hdr, self.fmt_hdr) row_idx += 1 return row_idx
[ "def", "wr_hdrs", "(", "self", ",", "worksheet", ",", "row_idx", ")", ":", "for", "col_idx", ",", "hdr", "in", "enumerate", "(", "self", ".", "hdrs", ")", ":", "# print(\"ROW({R}) COL({C}) HDR({H}) FMT({F})\\n\".format(", "# R=row_idx, C=col_idx, H=hdr, F=self.fmt_hdr))", "worksheet", ".", "write", "(", "row_idx", ",", "col_idx", ",", "hdr", ",", "self", ".", "fmt_hdr", ")", "row_idx", "+=", "1", "return", "row_idx" ]
Print row of column headers
[ "Print", "row", "of", "column", "headers" ]
python
train
45.5
dopefishh/pympi
pympi/Elan.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L1276-L1288
def rename_tier(self, id_from, id_to): """Rename a tier. Note that this renames also the child tiers that have the tier as a parent. :param str id_from: Original name of the tier. :param str id_to: Target name of the tier. :throws KeyError: If the tier doesnt' exist. """ childs = self.get_child_tiers_for(id_from) self.tiers[id_to] = self.tiers.pop(id_from) self.tiers[id_to][2]['TIER_ID'] = id_to for child in childs: self.tiers[child][2]['PARENT_REF'] = id_to
[ "def", "rename_tier", "(", "self", ",", "id_from", ",", "id_to", ")", ":", "childs", "=", "self", ".", "get_child_tiers_for", "(", "id_from", ")", "self", ".", "tiers", "[", "id_to", "]", "=", "self", ".", "tiers", ".", "pop", "(", "id_from", ")", "self", ".", "tiers", "[", "id_to", "]", "[", "2", "]", "[", "'TIER_ID'", "]", "=", "id_to", "for", "child", "in", "childs", ":", "self", ".", "tiers", "[", "child", "]", "[", "2", "]", "[", "'PARENT_REF'", "]", "=", "id_to" ]
Rename a tier. Note that this renames also the child tiers that have the tier as a parent. :param str id_from: Original name of the tier. :param str id_to: Target name of the tier. :throws KeyError: If the tier doesnt' exist.
[ "Rename", "a", "tier", ".", "Note", "that", "this", "renames", "also", "the", "child", "tiers", "that", "have", "the", "tier", "as", "a", "parent", "." ]
python
test
41.769231
EventTeam/beliefs
src/beliefs/cells/colors.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/colors.py#L70-L78
def membership_score(self, element): """ Fuzzy set gradable membership score See http://code.google.com/p/python-colormath/wiki/ColorDifferences """ other = self.coerce(element) if self.value and other.value: return 1 - (self.value.delta_e(other.value, mode='cmc', pl=1, pc=1) / 200.0) else: return 0.0
[ "def", "membership_score", "(", "self", ",", "element", ")", ":", "other", "=", "self", ".", "coerce", "(", "element", ")", "if", "self", ".", "value", "and", "other", ".", "value", ":", "return", "1", "-", "(", "self", ".", "value", ".", "delta_e", "(", "other", ".", "value", ",", "mode", "=", "'cmc'", ",", "pl", "=", "1", ",", "pc", "=", "1", ")", "/", "200.0", ")", "else", ":", "return", "0.0" ]
Fuzzy set gradable membership score See http://code.google.com/p/python-colormath/wiki/ColorDifferences
[ "Fuzzy", "set", "gradable", "membership", "score", "See", "http", ":", "//", "code", ".", "google", ".", "com", "/", "p", "/", "python", "-", "colormath", "/", "wiki", "/", "ColorDifferences" ]
python
train
40.666667
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_clock.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_clock.py#L12-L22
def clock_sa_clock_timezone(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") clock_sa = ET.SubElement(config, "clock-sa", xmlns="urn:brocade.com:mgmt:brocade-clock") clock = ET.SubElement(clock_sa, "clock") timezone = ET.SubElement(clock, "timezone") timezone.text = kwargs.pop('timezone') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "clock_sa_clock_timezone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "clock_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"clock-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-clock\"", ")", "clock", "=", "ET", ".", "SubElement", "(", "clock_sa", ",", "\"clock\"", ")", "timezone", "=", "ET", ".", "SubElement", "(", "clock", ",", "\"timezone\"", ")", "timezone", ".", "text", "=", "kwargs", ".", "pop", "(", "'timezone'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41
vallis/libstempo
libstempo/toasim.py
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L677-L701
def extrap1d(interpolator): """ Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation """ xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
[ "def", "extrap1d", "(", "interpolator", ")", ":", "xs", "=", "interpolator", ".", "x", "ys", "=", "interpolator", ".", "y", "def", "pointwise", "(", "x", ")", ":", "if", "x", "<", "xs", "[", "0", "]", ":", "return", "ys", "[", "0", "]", "# +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])", "elif", "x", ">", "xs", "[", "-", "1", "]", ":", "return", "ys", "[", "-", "1", "]", "# +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])", "else", ":", "return", "interpolator", "(", "x", ")", "def", "ufunclike", "(", "xs", ")", ":", "return", "N", ".", "array", "(", "map", "(", "pointwise", ",", "N", ".", "array", "(", "xs", ")", ")", ")", "return", "ufunclike" ]
Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation
[ "Function", "to", "extend", "an", "interpolation", "function", "to", "an", "extrapolation", "function", "." ]
python
train
24.88
rodynnz/xccdf
src/xccdf/models/status.py
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/status.py#L69-L80
def str_to_date(self): """ Returns the date attribute as a date object. :returns: Date of the status if it exists. :rtype: date or NoneType """ if hasattr(self, 'date'): return date(*list(map(int, self.date.split('-')))) else: return None
[ "def", "str_to_date", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'date'", ")", ":", "return", "date", "(", "*", "list", "(", "map", "(", "int", ",", "self", ".", "date", ".", "split", "(", "'-'", ")", ")", ")", ")", "else", ":", "return", "None" ]
Returns the date attribute as a date object. :returns: Date of the status if it exists. :rtype: date or NoneType
[ "Returns", "the", "date", "attribute", "as", "a", "date", "object", "." ]
python
train
25.75
jssimporter/python-jss
jss/jssobject.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L310-L352
def save(self): """Update or create a new object on the JSS. If this object is not yet on the JSS, this method will create a new object with POST, otherwise, it will try to update the existing object with PUT. Data validation is up to the client; The JSS in most cases will at least give you some hints as to what is invalid. """ # Object probably exists if it has an ID (user can't assign # one). The only objects that don't have an ID are those that # cannot list. if self.can_put and (not self.can_list or self.id): # The JSS will reject PUT requests for objects that do not have # a category. The JSS assigns a name of "No category assigned", # which it will reject. Therefore, if that is the category # name, changed it to "", which is accepted. categories = [elem for elem in self.findall("category")] categories.extend([elem for elem in self.findall("category/name")]) for cat_tag in categories: if cat_tag.text == "No category assigned": cat_tag.text = "" try: self.jss.put(self.url, self) updated_data = self.jss.get(self.url) except JSSPutError as put_error: # Something when wrong. raise JSSPutError(put_error) elif self.can_post: url = self.get_post_url() try: updated_data = self.jss.post(self.__class__, url, self) except JSSPostError as err: raise JSSPostError(err) else: raise JSSMethodNotAllowedError(self.__class__.__name__) # Replace current instance's data with new, JSS-validated data. self.clear() for child in updated_data.getchildren(): self._children.append(child)
[ "def", "save", "(", "self", ")", ":", "# Object probably exists if it has an ID (user can't assign", "# one). The only objects that don't have an ID are those that", "# cannot list.", "if", "self", ".", "can_put", "and", "(", "not", "self", ".", "can_list", "or", "self", ".", "id", ")", ":", "# The JSS will reject PUT requests for objects that do not have", "# a category. The JSS assigns a name of \"No category assigned\",", "# which it will reject. Therefore, if that is the category", "# name, changed it to \"\", which is accepted.", "categories", "=", "[", "elem", "for", "elem", "in", "self", ".", "findall", "(", "\"category\"", ")", "]", "categories", ".", "extend", "(", "[", "elem", "for", "elem", "in", "self", ".", "findall", "(", "\"category/name\"", ")", "]", ")", "for", "cat_tag", "in", "categories", ":", "if", "cat_tag", ".", "text", "==", "\"No category assigned\"", ":", "cat_tag", ".", "text", "=", "\"\"", "try", ":", "self", ".", "jss", ".", "put", "(", "self", ".", "url", ",", "self", ")", "updated_data", "=", "self", ".", "jss", ".", "get", "(", "self", ".", "url", ")", "except", "JSSPutError", "as", "put_error", ":", "# Something when wrong.", "raise", "JSSPutError", "(", "put_error", ")", "elif", "self", ".", "can_post", ":", "url", "=", "self", ".", "get_post_url", "(", ")", "try", ":", "updated_data", "=", "self", ".", "jss", ".", "post", "(", "self", ".", "__class__", ",", "url", ",", "self", ")", "except", "JSSPostError", "as", "err", ":", "raise", "JSSPostError", "(", "err", ")", "else", ":", "raise", "JSSMethodNotAllowedError", "(", "self", ".", "__class__", ".", "__name__", ")", "# Replace current instance's data with new, JSS-validated data.", "self", ".", "clear", "(", ")", "for", "child", "in", "updated_data", ".", "getchildren", "(", ")", ":", "self", ".", "_children", ".", "append", "(", "child", ")" ]
Update or create a new object on the JSS. If this object is not yet on the JSS, this method will create a new object with POST, otherwise, it will try to update the existing object with PUT. Data validation is up to the client; The JSS in most cases will at least give you some hints as to what is invalid.
[ "Update", "or", "create", "a", "new", "object", "on", "the", "JSS", "." ]
python
train
43.72093
linkedin/naarad
src/naarad/reporting/diff.py
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/reporting/diff.py#L297-L311
def check_sla(self, sla, diff_metric): """ Check whether the SLA has passed or failed """ try: if sla.display is '%': diff_val = float(diff_metric['percent_diff']) else: diff_val = float(diff_metric['absolute_diff']) except ValueError: return False if not (sla.check_sla_passed(diff_val)): self.sla_failures += 1 self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric)) return True
[ "def", "check_sla", "(", "self", ",", "sla", ",", "diff_metric", ")", ":", "try", ":", "if", "sla", ".", "display", "is", "'%'", ":", "diff_val", "=", "float", "(", "diff_metric", "[", "'percent_diff'", "]", ")", "else", ":", "diff_val", "=", "float", "(", "diff_metric", "[", "'absolute_diff'", "]", ")", "except", "ValueError", ":", "return", "False", "if", "not", "(", "sla", ".", "check_sla_passed", "(", "diff_val", ")", ")", ":", "self", ".", "sla_failures", "+=", "1", "self", ".", "sla_failure_list", ".", "append", "(", "DiffSLAFailure", "(", "sla", ",", "diff_metric", ")", ")", "return", "True" ]
Check whether the SLA has passed or failed
[ "Check", "whether", "the", "SLA", "has", "passed", "or", "failed" ]
python
valid
29.8
sephii/zipch
zipch/zipcodes.py
https://github.com/sephii/zipch/blob/a64720e8cb55d00edeab30c426791cf87bcca82a/zipch/zipcodes.py#L115-L124
def get_zipcodes_for_canton(self, canton): """ Return the list of zipcodes for the given canton code. """ zipcodes = [ zipcode for zipcode, location in self.get_locations().items() if location.canton == canton ] return zipcodes
[ "def", "get_zipcodes_for_canton", "(", "self", ",", "canton", ")", ":", "zipcodes", "=", "[", "zipcode", "for", "zipcode", ",", "location", "in", "self", ".", "get_locations", "(", ")", ".", "items", "(", ")", "if", "location", ".", "canton", "==", "canton", "]", "return", "zipcodes" ]
Return the list of zipcodes for the given canton code.
[ "Return", "the", "list", "of", "zipcodes", "for", "the", "given", "canton", "code", "." ]
python
train
29.1
spyder-ide/spyder
spyder/widgets/mixins.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L862-L868
def mouseReleaseEvent(self, event): """Go to error""" self.QT_CLASS.mouseReleaseEvent(self, event) text = self.get_line_at(event.pos()) if get_error_match(text) and not self.has_selected_text(): if self.go_to_error is not None: self.go_to_error.emit(text)
[ "def", "mouseReleaseEvent", "(", "self", ",", "event", ")", ":", "self", ".", "QT_CLASS", ".", "mouseReleaseEvent", "(", "self", ",", "event", ")", "text", "=", "self", ".", "get_line_at", "(", "event", ".", "pos", "(", ")", ")", "if", "get_error_match", "(", "text", ")", "and", "not", "self", ".", "has_selected_text", "(", ")", ":", "if", "self", ".", "go_to_error", "is", "not", "None", ":", "self", ".", "go_to_error", ".", "emit", "(", "text", ")" ]
Go to error
[ "Go", "to", "error" ]
python
train
45
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L288-L294
def swipe_top(self, steps=10, *args, **selectors): """ Swipe the UI object with *selectors* from center to top See `Swipe Left` for more details. """ self.device(**selectors).swipe.up(steps=steps)
[ "def", "swipe_top", "(", "self", ",", "steps", "=", "10", ",", "*", "args", ",", "*", "*", "selectors", ")", ":", "self", ".", "device", "(", "*", "*", "selectors", ")", ".", "swipe", ".", "up", "(", "steps", "=", "steps", ")" ]
Swipe the UI object with *selectors* from center to top See `Swipe Left` for more details.
[ "Swipe", "the", "UI", "object", "with", "*", "selectors", "*", "from", "center", "to", "top" ]
python
train
33
eumis/pyviews
pyviews/rendering/pipeline.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/pipeline.py#L37-L43
def get_pipeline(node: Node) -> RenderingPipeline: """Gets rendering pipeline for passed node""" pipeline = _get_registered_pipeline(node) if pipeline is None: msg = _get_pipeline_registration_error_message(node) raise RenderingError(msg) return pipeline
[ "def", "get_pipeline", "(", "node", ":", "Node", ")", "->", "RenderingPipeline", ":", "pipeline", "=", "_get_registered_pipeline", "(", "node", ")", "if", "pipeline", "is", "None", ":", "msg", "=", "_get_pipeline_registration_error_message", "(", "node", ")", "raise", "RenderingError", "(", "msg", ")", "return", "pipeline" ]
Gets rendering pipeline for passed node
[ "Gets", "rendering", "pipeline", "for", "passed", "node" ]
python
train
40
inveniosoftware/invenio-migrator
invenio_migrator/cli.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/cli.py#L227-L232
def loadusers(sources): """Load users.""" from .tasks.users import load_user # Cannot be executed asynchronously due to duplicate emails and usernames # which can create a racing condition. loadcommon(sources, load_user, asynchronous=False)
[ "def", "loadusers", "(", "sources", ")", ":", "from", ".", "tasks", ".", "users", "import", "load_user", "# Cannot be executed asynchronously due to duplicate emails and usernames", "# which can create a racing condition.", "loadcommon", "(", "sources", ",", "load_user", ",", "asynchronous", "=", "False", ")" ]
Load users.
[ "Load", "users", "." ]
python
test
42.5
gabstopper/smc-python
smc/core/route.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/route.py#L671-L711
def remove_route_gateway(self, element, network=None): """ Remove a route element by href or Element. Use this if you want to remove a netlink or a routing element such as BGP or OSPF. Removing is done from within the routing interface context. :: interface0 = engine.routing.get(0) interface0.remove_route_gateway(StaticNetlink('mynetlink')) Only from a specific network on a multi-address interface:: interface0.remove_route_gateway( StaticNetlink('mynetlink'), network='172.18.1.0/24') :param str,Element element: element to remove from this routing node :param str network: if network specified, only add OSPF to this network on interface :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failure to update routing table :return: Status of whether the entry was removed (i.e. or not found) :rtype: bool """ if self.level not in ('interface',): raise ModificationAborted('You must make this change from the ' 'interface routing level. Current node: {}'.format(self)) node_changed = False element = element_resolver(element) for network in self: # Tunnel Interface binds gateways to the interface if network.level == 'gateway' and network.data.get('href') == element: network.delete() node_changed = True break for gateway in network: if gateway.data.get('href') == element: gateway.delete() node_changed = True return node_changed
[ "def", "remove_route_gateway", "(", "self", ",", "element", ",", "network", "=", "None", ")", ":", "if", "self", ".", "level", "not", "in", "(", "'interface'", ",", ")", ":", "raise", "ModificationAborted", "(", "'You must make this change from the '", "'interface routing level. Current node: {}'", ".", "format", "(", "self", ")", ")", "node_changed", "=", "False", "element", "=", "element_resolver", "(", "element", ")", "for", "network", "in", "self", ":", "# Tunnel Interface binds gateways to the interface", "if", "network", ".", "level", "==", "'gateway'", "and", "network", ".", "data", ".", "get", "(", "'href'", ")", "==", "element", ":", "network", ".", "delete", "(", ")", "node_changed", "=", "True", "break", "for", "gateway", "in", "network", ":", "if", "gateway", ".", "data", ".", "get", "(", "'href'", ")", "==", "element", ":", "gateway", ".", "delete", "(", ")", "node_changed", "=", "True", "return", "node_changed" ]
Remove a route element by href or Element. Use this if you want to remove a netlink or a routing element such as BGP or OSPF. Removing is done from within the routing interface context. :: interface0 = engine.routing.get(0) interface0.remove_route_gateway(StaticNetlink('mynetlink')) Only from a specific network on a multi-address interface:: interface0.remove_route_gateway( StaticNetlink('mynetlink'), network='172.18.1.0/24') :param str,Element element: element to remove from this routing node :param str network: if network specified, only add OSPF to this network on interface :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failure to update routing table :return: Status of whether the entry was removed (i.e. or not found) :rtype: bool
[ "Remove", "a", "route", "element", "by", "href", "or", "Element", ".", "Use", "this", "if", "you", "want", "to", "remove", "a", "netlink", "or", "a", "routing", "element", "such", "as", "BGP", "or", "OSPF", ".", "Removing", "is", "done", "from", "within", "the", "routing", "interface", "context", ".", "::", "interface0", "=", "engine", ".", "routing", ".", "get", "(", "0", ")", "interface0", ".", "remove_route_gateway", "(", "StaticNetlink", "(", "mynetlink", "))", "Only", "from", "a", "specific", "network", "on", "a", "multi", "-", "address", "interface", "::", "interface0", ".", "remove_route_gateway", "(", "StaticNetlink", "(", "mynetlink", ")", "network", "=", "172", ".", "18", ".", "1", ".", "0", "/", "24", ")", ":", "param", "str", "Element", "element", ":", "element", "to", "remove", "from", "this", "routing", "node", ":", "param", "str", "network", ":", "if", "network", "specified", "only", "add", "OSPF", "to", "this", "network", "on", "interface", ":", "raises", "ModificationAborted", ":", "Change", "must", "be", "made", "at", "the", "interface", "level", ":", "raises", "UpdateElementFailed", ":", "failure", "to", "update", "routing", "table", ":", "return", ":", "Status", "of", "whether", "the", "entry", "was", "removed", "(", "i", ".", "e", ".", "or", "not", "found", ")", ":", "rtype", ":", "bool" ]
python
train
43.536585
AltSchool/dynamic-rest
dynamic_rest/serializers.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/serializers.py#L520-L541
def _link_fields(self): """Construct dict of name:field for linkable fields.""" query_params = self.get_request_attribute('query_params', {}) if 'exclude_links' in query_params: return {} else: all_fields = self.get_all_fields() return { name: field for name, field in six.iteritems(all_fields) if isinstance(field, DynamicRelationField) and getattr(field, 'link', True) and not ( # Skip sideloaded fields name in self.fields and self.is_field_sideloaded(name) ) and not ( # Skip included single relations # TODO: Use links, when we can generate canonical URLs name in self.fields and not getattr(field, 'many', False) ) }
[ "def", "_link_fields", "(", "self", ")", ":", "query_params", "=", "self", ".", "get_request_attribute", "(", "'query_params'", ",", "{", "}", ")", "if", "'exclude_links'", "in", "query_params", ":", "return", "{", "}", "else", ":", "all_fields", "=", "self", ".", "get_all_fields", "(", ")", "return", "{", "name", ":", "field", "for", "name", ",", "field", "in", "six", ".", "iteritems", "(", "all_fields", ")", "if", "isinstance", "(", "field", ",", "DynamicRelationField", ")", "and", "getattr", "(", "field", ",", "'link'", ",", "True", ")", "and", "not", "(", "# Skip sideloaded fields", "name", "in", "self", ".", "fields", "and", "self", ".", "is_field_sideloaded", "(", "name", ")", ")", "and", "not", "(", "# Skip included single relations", "# TODO: Use links, when we can generate canonical URLs", "name", "in", "self", ".", "fields", "and", "not", "getattr", "(", "field", ",", "'many'", ",", "False", ")", ")", "}" ]
Construct dict of name:field for linkable fields.
[ "Construct", "dict", "of", "name", ":", "field", "for", "linkable", "fields", "." ]
python
train
41.681818
majuss/lupupy
lupupy/devices/__init__.py
https://github.com/majuss/lupupy/blob/71af6c397837ffc393c7b8122be175602638d3c6/lupupy/devices/__init__.py#L34-L55
def refresh(self): """Refresh a device""" # new_device = {} if self.type in CONST.BINARY_SENSOR_TYPES: response = self._lupusec.get_sensors() for device in response: if device['device_id'] == self._device_id: self.update(device) return device elif self.type == CONST.ALARM_TYPE: response = self._lupusec.get_panel() self.update(response) return response elif self.type == CONST.TYPE_POWER_SWITCH: response = self._lupusec.get_power_switches() for pss in response: if pss['device_id'] == self._device_id: self.update(pss) return pss
[ "def", "refresh", "(", "self", ")", ":", "# new_device = {}", "if", "self", ".", "type", "in", "CONST", ".", "BINARY_SENSOR_TYPES", ":", "response", "=", "self", ".", "_lupusec", ".", "get_sensors", "(", ")", "for", "device", "in", "response", ":", "if", "device", "[", "'device_id'", "]", "==", "self", ".", "_device_id", ":", "self", ".", "update", "(", "device", ")", "return", "device", "elif", "self", ".", "type", "==", "CONST", ".", "ALARM_TYPE", ":", "response", "=", "self", ".", "_lupusec", ".", "get_panel", "(", ")", "self", ".", "update", "(", "response", ")", "return", "response", "elif", "self", ".", "type", "==", "CONST", ".", "TYPE_POWER_SWITCH", ":", "response", "=", "self", ".", "_lupusec", ".", "get_power_switches", "(", ")", "for", "pss", "in", "response", ":", "if", "pss", "[", "'device_id'", "]", "==", "self", ".", "_device_id", ":", "self", ".", "update", "(", "pss", ")", "return", "pss" ]
Refresh a device
[ "Refresh", "a", "device" ]
python
train
33.681818
buriburisuri/sugartensor
sugartensor/sg_data.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_data.py#L10-L29
def _data_to_tensor(data_list, batch_size, name=None): r"""Returns batch queues from the whole data. Args: data_list: A list of ndarrays. Every array must have the same size in the first dimension. batch_size: An integer. name: A name for the operations (optional). Returns: A list of tensors of `batch_size`. """ # convert to constant tensor const_list = [tf.constant(data) for data in data_list] # create queue from constant tensor queue_list = tf.train.slice_input_producer(const_list, capacity=batch_size*128, name=name) # create batch queue return tf.train.shuffle_batch(queue_list, batch_size, capacity=batch_size*128, min_after_dequeue=batch_size*32, name=name)
[ "def", "_data_to_tensor", "(", "data_list", ",", "batch_size", ",", "name", "=", "None", ")", ":", "# convert to constant tensor", "const_list", "=", "[", "tf", ".", "constant", "(", "data", ")", "for", "data", "in", "data_list", "]", "# create queue from constant tensor", "queue_list", "=", "tf", ".", "train", ".", "slice_input_producer", "(", "const_list", ",", "capacity", "=", "batch_size", "*", "128", ",", "name", "=", "name", ")", "# create batch queue", "return", "tf", ".", "train", ".", "shuffle_batch", "(", "queue_list", ",", "batch_size", ",", "capacity", "=", "batch_size", "*", "128", ",", "min_after_dequeue", "=", "batch_size", "*", "32", ",", "name", "=", "name", ")" ]
r"""Returns batch queues from the whole data. Args: data_list: A list of ndarrays. Every array must have the same size in the first dimension. batch_size: An integer. name: A name for the operations (optional). Returns: A list of tensors of `batch_size`.
[ "r", "Returns", "batch", "queues", "from", "the", "whole", "data", ".", "Args", ":", "data_list", ":", "A", "list", "of", "ndarrays", ".", "Every", "array", "must", "have", "the", "same", "size", "in", "the", "first", "dimension", ".", "batch_size", ":", "An", "integer", ".", "name", ":", "A", "name", "for", "the", "operations", "(", "optional", ")", ".", "Returns", ":", "A", "list", "of", "tensors", "of", "batch_size", "." ]
python
train
38.1
exhuma/python-cluster
cluster/util.py
https://github.com/exhuma/python-cluster/blob/4c0ac14d9beafcd51f0d849151514083c296402f/cluster/util.py#L118-L124
def dotproduct(a, b): "Calculates the dotproduct between two vecors" assert(len(a) == len(b)) out = 0 for i in range(len(a)): out += a[i] * b[i] return out
[ "def", "dotproduct", "(", "a", ",", "b", ")", ":", "assert", "(", "len", "(", "a", ")", "==", "len", "(", "b", ")", ")", "out", "=", "0", "for", "i", "in", "range", "(", "len", "(", "a", ")", ")", ":", "out", "+=", "a", "[", "i", "]", "*", "b", "[", "i", "]", "return", "out" ]
Calculates the dotproduct between two vecors
[ "Calculates", "the", "dotproduct", "between", "two", "vecors" ]
python
train
25.285714
sixty-north/cosmic-ray
src/cosmic_ray/cloning.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/cloning.py#L139-L162
def _build_env(venv_dir): """Create a new virtual environment in `venv_dir`. This uses the base prefix of any virtual environment that you may be using when you call this. """ # NB: We had to create the because the venv modules wasn't doing what we # needed. In particular, if we used it create a venv from an existing venv, # it *always* created symlinks back to the original venv's python # executables. Then, when you used those linked executables, you ended up # interacting with the original venv. I could find no way around this, hence # this function. prefix = getattr(sys, 'real_prefix', sys.prefix) python = Path(prefix) / 'bin' / 'python' command = '{} -m venv {}'.format(python, venv_dir) try: log.info('Creating virtual environment: %s', command) subprocess.run(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) except subprocess.CalledProcessError as exc: log.error("Error creating virtual environment: %s", exc.output) raise
[ "def", "_build_env", "(", "venv_dir", ")", ":", "# NB: We had to create the because the venv modules wasn't doing what we", "# needed. In particular, if we used it create a venv from an existing venv,", "# it *always* created symlinks back to the original venv's python", "# executables. Then, when you used those linked executables, you ended up", "# interacting with the original venv. I could find no way around this, hence", "# this function.", "prefix", "=", "getattr", "(", "sys", ",", "'real_prefix'", ",", "sys", ".", "prefix", ")", "python", "=", "Path", "(", "prefix", ")", "/", "'bin'", "/", "'python'", "command", "=", "'{} -m venv {}'", ".", "format", "(", "python", ",", "venv_dir", ")", "try", ":", "log", ".", "info", "(", "'Creating virtual environment: %s'", ",", "command", ")", "subprocess", ".", "run", "(", "command", ".", "split", "(", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "check", "=", "True", ")", "except", "subprocess", ".", "CalledProcessError", "as", "exc", ":", "log", ".", "error", "(", "\"Error creating virtual environment: %s\"", ",", "exc", ".", "output", ")", "raise" ]
Create a new virtual environment in `venv_dir`. This uses the base prefix of any virtual environment that you may be using when you call this.
[ "Create", "a", "new", "virtual", "environment", "in", "venv_dir", "." ]
python
train
46.083333
mattloper/chumpy
chumpy/utils.py
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L36-L41
def dfs_do_func_on_graph(node, func, *args, **kwargs): ''' invoke func on each node of the dr graph ''' for _node in node.tree_iterator(): func(_node, *args, **kwargs)
[ "def", "dfs_do_func_on_graph", "(", "node", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_node", "in", "node", ".", "tree_iterator", "(", ")", ":", "func", "(", "_node", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
invoke func on each node of the dr graph
[ "invoke", "func", "on", "each", "node", "of", "the", "dr", "graph" ]
python
train
31
inveniosoftware-attic/invenio-utils
invenio_utils/url.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/url.py#L470-L515
def get_canonical_and_alternates_urls( url, drop_ln=True, washed_argd=None, quote_path=False): """ Given an Invenio URL returns a tuple with two elements. The first is the canonical URL, that is the original URL with CFG_SITE_URL prefix, and where the ln= argument stripped. The second element element is mapping, language code -> alternate URL @param quote_path: if True, the path section of the given C{url} is quoted according to RFC 2396 """ dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse( url) canonical_scheme, canonical_netloc = urlparse(cfg.get('CFG_SITE_URL'))[0:2] parsed_query = washed_argd or parse_qsl(query) no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != 'ln'] if drop_ln: canonical_parsed_query = no_ln_parsed_query else: canonical_parsed_query = parsed_query if quote_path: path = urllib.quote(path) canonical_query = urlencode(canonical_parsed_query) canonical_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment)) alternate_urls = {} for ln in cfg.get('CFG_SITE_LANGS'): alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)]) alternate_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment)) alternate_urls[ln] = alternate_url return canonical_url, alternate_urls
[ "def", "get_canonical_and_alternates_urls", "(", "url", ",", "drop_ln", "=", "True", ",", "washed_argd", "=", "None", ",", "quote_path", "=", "False", ")", ":", "dummy_scheme", ",", "dummy_netloc", ",", "path", ",", "dummy_params", ",", "query", ",", "fragment", "=", "urlparse", "(", "url", ")", "canonical_scheme", ",", "canonical_netloc", "=", "urlparse", "(", "cfg", ".", "get", "(", "'CFG_SITE_URL'", ")", ")", "[", "0", ":", "2", "]", "parsed_query", "=", "washed_argd", "or", "parse_qsl", "(", "query", ")", "no_ln_parsed_query", "=", "[", "(", "key", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "parsed_query", "if", "key", "!=", "'ln'", "]", "if", "drop_ln", ":", "canonical_parsed_query", "=", "no_ln_parsed_query", "else", ":", "canonical_parsed_query", "=", "parsed_query", "if", "quote_path", ":", "path", "=", "urllib", ".", "quote", "(", "path", ")", "canonical_query", "=", "urlencode", "(", "canonical_parsed_query", ")", "canonical_url", "=", "urlunparse", "(", "(", "canonical_scheme", ",", "canonical_netloc", ",", "path", ",", "dummy_params", ",", "canonical_query", ",", "fragment", ")", ")", "alternate_urls", "=", "{", "}", "for", "ln", "in", "cfg", ".", "get", "(", "'CFG_SITE_LANGS'", ")", ":", "alternate_query", "=", "urlencode", "(", "no_ln_parsed_query", "+", "[", "(", "'ln'", ",", "ln", ")", "]", ")", "alternate_url", "=", "urlunparse", "(", "(", "canonical_scheme", ",", "canonical_netloc", ",", "path", ",", "dummy_params", ",", "alternate_query", ",", "fragment", ")", ")", "alternate_urls", "[", "ln", "]", "=", "alternate_url", "return", "canonical_url", ",", "alternate_urls" ]
Given an Invenio URL returns a tuple with two elements. The first is the canonical URL, that is the original URL with CFG_SITE_URL prefix, and where the ln= argument stripped. The second element element is mapping, language code -> alternate URL @param quote_path: if True, the path section of the given C{url} is quoted according to RFC 2396
[ "Given", "an", "Invenio", "URL", "returns", "a", "tuple", "with", "two", "elements", ".", "The", "first", "is", "the", "canonical", "URL", "that", "is", "the", "original", "URL", "with", "CFG_SITE_URL", "prefix", "and", "where", "the", "ln", "=", "argument", "stripped", ".", "The", "second", "element", "element", "is", "mapping", "language", "code", "-", ">", "alternate", "URL" ]
python
train
35.717391
facelessuser/pyspelling
pyspelling/filters/__init__.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L179-L193
def _run_first(self, source_file): """Run on as first in chain.""" self.reset() self.current_encoding = self.default_encoding encoding = None try: encoding = self._detect_encoding(source_file) content = self.filter(source_file, encoding) except UnicodeDecodeError: if not encoding or encoding != self.default_encoding: content = self.filter(source_file, self.default_encoding) else: raise return content
[ "def", "_run_first", "(", "self", ",", "source_file", ")", ":", "self", ".", "reset", "(", ")", "self", ".", "current_encoding", "=", "self", ".", "default_encoding", "encoding", "=", "None", "try", ":", "encoding", "=", "self", ".", "_detect_encoding", "(", "source_file", ")", "content", "=", "self", ".", "filter", "(", "source_file", ",", "encoding", ")", "except", "UnicodeDecodeError", ":", "if", "not", "encoding", "or", "encoding", "!=", "self", ".", "default_encoding", ":", "content", "=", "self", ".", "filter", "(", "source_file", ",", "self", ".", "default_encoding", ")", "else", ":", "raise", "return", "content" ]
Run on as first in chain.
[ "Run", "on", "as", "first", "in", "chain", "." ]
python
train
35.066667
softlayer/softlayer-python
SoftLayer/managers/image.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/image.py#L48-L70
def list_private_images(self, guid=None, name=None, **kwargs): """List all private images. :param string guid: filter based on GUID :param string name: filter based on name :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) """ if 'mask' not in kwargs: kwargs['mask'] = IMAGE_MASK _filter = utils.NestedDict(kwargs.get('filter') or {}) if name: _filter['privateBlockDeviceTemplateGroups']['name'] = ( utils.query_filter(name)) if guid: _filter['privateBlockDeviceTemplateGroups']['globalIdentifier'] = ( utils.query_filter(guid)) kwargs['filter'] = _filter.to_dict() account = self.client['Account'] return account.getPrivateBlockDeviceTemplateGroups(**kwargs)
[ "def", "list_private_images", "(", "self", ",", "guid", "=", "None", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "'mask'", "not", "in", "kwargs", ":", "kwargs", "[", "'mask'", "]", "=", "IMAGE_MASK", "_filter", "=", "utils", ".", "NestedDict", "(", "kwargs", ".", "get", "(", "'filter'", ")", "or", "{", "}", ")", "if", "name", ":", "_filter", "[", "'privateBlockDeviceTemplateGroups'", "]", "[", "'name'", "]", "=", "(", "utils", ".", "query_filter", "(", "name", ")", ")", "if", "guid", ":", "_filter", "[", "'privateBlockDeviceTemplateGroups'", "]", "[", "'globalIdentifier'", "]", "=", "(", "utils", ".", "query_filter", "(", "guid", ")", ")", "kwargs", "[", "'filter'", "]", "=", "_filter", ".", "to_dict", "(", ")", "account", "=", "self", ".", "client", "[", "'Account'", "]", "return", "account", ".", "getPrivateBlockDeviceTemplateGroups", "(", "*", "*", "kwargs", ")" ]
List all private images. :param string guid: filter based on GUID :param string name: filter based on name :param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
[ "List", "all", "private", "images", "." ]
python
train
35.869565
rmed/pyemtmad
pyemtmad/util.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/util.py#L87-L107
def datetime_string(day, month, year, hour, minute): """Build a date string using the provided day, month, year numbers. Automatically adds a leading zero to ``day`` and ``month`` if they only have one digit. Args: day (int): Day number. month(int): Month number. year(int): Year number. hour (int): Hour of the day in 24h format. minute (int): Minute of the hour. Returns: str: Date in the format *YYYY-MM-DDThh:mm:ss*. """ # Overflow if hour < 0 or hour > 23: hour = 0 if minute < 0 or minute > 60: minute = 0 return '%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute)
[ "def", "datetime_string", "(", "day", ",", "month", ",", "year", ",", "hour", ",", "minute", ")", ":", "# Overflow", "if", "hour", "<", "0", "or", "hour", ">", "23", ":", "hour", "=", "0", "if", "minute", "<", "0", "or", "minute", ">", "60", ":", "minute", "=", "0", "return", "'%d-%02d-%02dT%02d:%02d:00'", "%", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ")" ]
Build a date string using the provided day, month, year numbers. Automatically adds a leading zero to ``day`` and ``month`` if they only have one digit. Args: day (int): Day number. month(int): Month number. year(int): Year number. hour (int): Hour of the day in 24h format. minute (int): Minute of the hour. Returns: str: Date in the format *YYYY-MM-DDThh:mm:ss*.
[ "Build", "a", "date", "string", "using", "the", "provided", "day", "month", "year", "numbers", "." ]
python
train
31.095238
lreis2415/PyGeoC
pygeoc/utils.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L240-L284
def rsquare(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate Coefficient of determination. Same as the square of the Pearson correlation coefficient (r), and, the same as the built-in Excel function RSQ(). Programmed according to equation (1) in Legates, D.R. and G.J. McCabe, 1999. Evaluating the use of "goodness of fit" measures in hydrologic and hydroclimatic model variation. Water Resources Research 35:233-241. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rsquare(obs, sim) # doctest: +ELLIPSIS 0.7528851650345053... Returns: R-square value, or raise exception """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be " "the same for R-square calculation!") if not isinstance(obsvalues, numpy.ndarray): obsvalues = numpy.array(obsvalues) if not isinstance(simvalues, numpy.ndarray): simvalues = numpy.array(simvalues) obs_avg = numpy.mean(obsvalues) pred_avg = numpy.mean(simvalues) obs_minus_avg_sq = numpy.sum((obsvalues - obs_avg) ** 2) pred_minus_avg_sq = numpy.sum((simvalues - pred_avg) ** 2) obs_pred_minus_avgs = numpy.sum((obsvalues - obs_avg) * (simvalues - pred_avg)) # Calculate R-square yy = obs_minus_avg_sq ** 0.5 * pred_minus_avg_sq ** 0.5 if MathClass.floatequal(yy, 0.): return 1. return (obs_pred_minus_avgs / yy) ** 2.
[ "def", "rsquare", "(", "obsvalues", ",", "# type: Union[numpy.ndarray, List[Union[float, int]]]", "simvalues", "# type: Union[numpy.ndarray, List[Union[float, int]]]", ")", ":", "# type: (...) -> Union[float, numpy.ScalarType]", "if", "len", "(", "obsvalues", ")", "!=", "len", "(", "simvalues", ")", ":", "raise", "ValueError", "(", "\"The size of observed and simulated values must be \"", "\"the same for R-square calculation!\"", ")", "if", "not", "isinstance", "(", "obsvalues", ",", "numpy", ".", "ndarray", ")", ":", "obsvalues", "=", "numpy", ".", "array", "(", "obsvalues", ")", "if", "not", "isinstance", "(", "simvalues", ",", "numpy", ".", "ndarray", ")", ":", "simvalues", "=", "numpy", ".", "array", "(", "simvalues", ")", "obs_avg", "=", "numpy", ".", "mean", "(", "obsvalues", ")", "pred_avg", "=", "numpy", ".", "mean", "(", "simvalues", ")", "obs_minus_avg_sq", "=", "numpy", ".", "sum", "(", "(", "obsvalues", "-", "obs_avg", ")", "**", "2", ")", "pred_minus_avg_sq", "=", "numpy", ".", "sum", "(", "(", "simvalues", "-", "pred_avg", ")", "**", "2", ")", "obs_pred_minus_avgs", "=", "numpy", ".", "sum", "(", "(", "obsvalues", "-", "obs_avg", ")", "*", "(", "simvalues", "-", "pred_avg", ")", ")", "# Calculate R-square", "yy", "=", "obs_minus_avg_sq", "**", "0.5", "*", "pred_minus_avg_sq", "**", "0.5", "if", "MathClass", ".", "floatequal", "(", "yy", ",", "0.", ")", ":", "return", "1.", "return", "(", "obs_pred_minus_avgs", "/", "yy", ")", "**", "2." ]
Calculate Coefficient of determination. Same as the square of the Pearson correlation coefficient (r), and, the same as the built-in Excel function RSQ(). Programmed according to equation (1) in Legates, D.R. and G.J. McCabe, 1999. Evaluating the use of "goodness of fit" measures in hydrologic and hydroclimatic model variation. Water Resources Research 35:233-241. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rsquare(obs, sim) # doctest: +ELLIPSIS 0.7528851650345053... Returns: R-square value, or raise exception
[ "Calculate", "Coefficient", "of", "determination", "." ]
python
train
46.022222
swharden/SWHLab
doc/uses/EPSCs-and-IPSCs/smooth histogram method/01.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/uses/EPSCs-and-IPSCs/smooth histogram method/01.py#L105-L175
def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False): """ m1 and m2, if given, are in seconds. returns [# EPSCs, # IPSCs] """ abf.setsweep(sweep) if m1 is None: m1=0 else: m1=m1*abf.pointsPerSec if m2 is None: m2=-1 else: m2=m2*abf.pointsPerSec # obtain X and Y Yorig=abf.sweepY[int(m1):int(m2)] X=np.arange(len(Yorig))/abf.pointsPerSec # start by lowpass filtering (1 direction) # Klpf=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True) # Ylpf=np.convolve(Yorig,Klpf,mode='same') # Y=Ylpf # commit Kmb=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True) Ymb=np.convolve(Yorig,Kmb,mode='same') Y=Yorig-Ymb # commit #Y1=np.copy(Y) #Y[np.where(Y>0)[0]]=np.power(Y,2) #Y[np.where(Y<0)[0]]=-np.power(Y,2) # event detection thresh=5 # threshold for an event hitPos=np.where(Y>thresh)[0] # area above the threshold hitNeg=np.where(Y<-thresh)[0] # area below the threshold hitPos=np.concatenate((hitPos,[len(Y)-1])) # helps with the diff() coming up hitNeg=np.concatenate((hitNeg,[len(Y)-1])) # helps with the diff() coming up hitsPos=hitPos[np.where(np.abs(np.diff(hitPos))>10)[0]] # time point of EPSC hitsNeg=hitNeg[np.where(np.abs(np.diff(hitNeg))>10)[0]] # time point of IPSC hitsNeg=hitsNeg[1:] # often the first one is in error #print(hitsNeg[0]) if plotToo: plt.figure(figsize=(10,5)) ax1=plt.subplot(211) plt.title("sweep %d: detected %d IPSCs (red) and %d EPSCs (blue)"%(sweep,len(hitsPos),len(hitsNeg))) plt.ylabel("delta pA") plt.grid() plt.plot(X,Yorig,color='k',alpha=.5) for hit in hitsPos: plt.plot(X[hit],Yorig[hit]+20,'r.',ms=20,alpha=.5) for hit in hitsNeg: plt.plot(X[hit],Yorig[hit]-20,'b.',ms=20,alpha=.5) plt.margins(0,.1) plt.subplot(212,sharex=ax1) plt.title("moving gaussian baseline subtraction used for threshold detection") plt.ylabel("delta pA") plt.grid() plt.axhline(thresh,color='r',ls='--',alpha=.5,lw=3) plt.axhline(-thresh,color='r',ls='--',alpha=.5,lw=3) plt.plot(X,Y,color='b',alpha=.5) plt.axis([X[0],X[-1],-thresh*1.5,thresh*1.5]) plt.tight_layout() if type(plotToo) is str and os.path.isdir(plotToo): print('saving %s/%05d.jpg'%(plotToo,sweep)) plt.savefig(plotToo+"/%05d.jpg"%sweep) else: plt.show() plt.close('all') return [len(hitsPos),len(hitsNeg)]
[ "def", "analyzeSweep", "(", "abf", ",", "sweep", ",", "m1", "=", "None", ",", "m2", "=", "None", ",", "plotToo", "=", "False", ")", ":", "abf", ".", "setsweep", "(", "sweep", ")", "if", "m1", "is", "None", ":", "m1", "=", "0", "else", ":", "m1", "=", "m1", "*", "abf", ".", "pointsPerSec", "if", "m2", "is", "None", ":", "m2", "=", "-", "1", "else", ":", "m2", "=", "m2", "*", "abf", ".", "pointsPerSec", "# obtain X and Y", "Yorig", "=", "abf", ".", "sweepY", "[", "int", "(", "m1", ")", ":", "int", "(", "m2", ")", "]", "X", "=", "np", ".", "arange", "(", "len", "(", "Yorig", ")", ")", "/", "abf", ".", "pointsPerSec", "# start by lowpass filtering (1 direction)", "# Klpf=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True)", "# Ylpf=np.convolve(Yorig,Klpf,mode='same')", "# Y=Ylpf # commit", "Kmb", "=", "kernel_gaussian", "(", "size", "=", "abf", ".", "pointsPerMs", "*", "10", ",", "forwardOnly", "=", "True", ")", "Ymb", "=", "np", ".", "convolve", "(", "Yorig", ",", "Kmb", ",", "mode", "=", "'same'", ")", "Y", "=", "Yorig", "-", "Ymb", "# commit", "#Y1=np.copy(Y)", "#Y[np.where(Y>0)[0]]=np.power(Y,2)", "#Y[np.where(Y<0)[0]]=-np.power(Y,2)", "# event detection", "thresh", "=", "5", "# threshold for an event", "hitPos", "=", "np", ".", "where", "(", "Y", ">", "thresh", ")", "[", "0", "]", "# area above the threshold", "hitNeg", "=", "np", ".", "where", "(", "Y", "<", "-", "thresh", ")", "[", "0", "]", "# area below the threshold", "hitPos", "=", "np", ".", "concatenate", "(", "(", "hitPos", ",", "[", "len", "(", "Y", ")", "-", "1", "]", ")", ")", "# helps with the diff() coming up", "hitNeg", "=", "np", ".", "concatenate", "(", "(", "hitNeg", ",", "[", "len", "(", "Y", ")", "-", "1", "]", ")", ")", "# helps with the diff() coming up", "hitsPos", "=", "hitPos", "[", "np", ".", "where", "(", "np", ".", "abs", "(", "np", ".", "diff", "(", "hitPos", ")", ")", ">", "10", ")", "[", "0", "]", "]", "# time point of EPSC", "hitsNeg", "=", "hitNeg", "[", "np", ".", "where", "(", "np", ".", "abs", "(", "np", ".", "diff", "(", "hitNeg", ")", ")", ">", "10", ")", "[", "0", "]", "]", "# time point of IPSC", "hitsNeg", "=", "hitsNeg", "[", "1", ":", "]", "# often the first one is in error", "#print(hitsNeg[0])", "if", "plotToo", ":", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "5", ")", ")", "ax1", "=", "plt", ".", "subplot", "(", "211", ")", "plt", ".", "title", "(", "\"sweep %d: detected %d IPSCs (red) and %d EPSCs (blue)\"", "%", "(", "sweep", ",", "len", "(", "hitsPos", ")", ",", "len", "(", "hitsNeg", ")", ")", ")", "plt", ".", "ylabel", "(", "\"delta pA\"", ")", "plt", ".", "grid", "(", ")", "plt", ".", "plot", "(", "X", ",", "Yorig", ",", "color", "=", "'k'", ",", "alpha", "=", ".5", ")", "for", "hit", "in", "hitsPos", ":", "plt", ".", "plot", "(", "X", "[", "hit", "]", ",", "Yorig", "[", "hit", "]", "+", "20", ",", "'r.'", ",", "ms", "=", "20", ",", "alpha", "=", ".5", ")", "for", "hit", "in", "hitsNeg", ":", "plt", ".", "plot", "(", "X", "[", "hit", "]", ",", "Yorig", "[", "hit", "]", "-", "20", ",", "'b.'", ",", "ms", "=", "20", ",", "alpha", "=", ".5", ")", "plt", ".", "margins", "(", "0", ",", ".1", ")", "plt", ".", "subplot", "(", "212", ",", "sharex", "=", "ax1", ")", "plt", ".", "title", "(", "\"moving gaussian baseline subtraction used for threshold detection\"", ")", "plt", ".", "ylabel", "(", "\"delta pA\"", ")", "plt", ".", "grid", "(", ")", "plt", ".", "axhline", "(", "thresh", ",", "color", "=", "'r'", ",", "ls", "=", "'--'", ",", "alpha", "=", ".5", ",", "lw", "=", "3", ")", "plt", ".", "axhline", "(", "-", "thresh", ",", "color", "=", "'r'", ",", "ls", "=", "'--'", ",", "alpha", "=", ".5", ",", "lw", "=", "3", ")", "plt", ".", "plot", "(", "X", ",", "Y", ",", "color", "=", "'b'", ",", "alpha", "=", ".5", ")", "plt", ".", "axis", "(", "[", "X", "[", "0", "]", ",", "X", "[", "-", "1", "]", ",", "-", "thresh", "*", "1.5", ",", "thresh", "*", "1.5", "]", ")", "plt", ".", "tight_layout", "(", ")", "if", "type", "(", "plotToo", ")", "is", "str", "and", "os", ".", "path", ".", "isdir", "(", "plotToo", ")", ":", "print", "(", "'saving %s/%05d.jpg'", "%", "(", "plotToo", ",", "sweep", ")", ")", "plt", ".", "savefig", "(", "plotToo", "+", "\"/%05d.jpg\"", "%", "sweep", ")", "else", ":", "plt", ".", "show", "(", ")", "plt", ".", "close", "(", "'all'", ")", "return", "[", "len", "(", "hitsPos", ")", ",", "len", "(", "hitsNeg", ")", "]" ]
m1 and m2, if given, are in seconds. returns [# EPSCs, # IPSCs]
[ "m1", "and", "m2", "if", "given", "are", "in", "seconds", ".", "returns", "[", "#", "EPSCs", "#", "IPSCs", "]" ]
python
valid
35.28169
wcember/pypub
pypub/clean.py
https://github.com/wcember/pypub/blob/88a1adc2ccf6f02c33adea1d2d52c729128216fb/pypub/clean.py#L11-L33
def create_html_from_fragment(tag): """ Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not Args: tag: a bs4.element.Tag Returns:" bs4.element.Tag: A bs4 tag representing a full html document """ try: assert isinstance(tag, bs4.element.Tag) except AssertionError: raise TypeError try: assert tag.find_all('body') == [] except AssertionError: raise ValueError soup = BeautifulSoup('<html><head></head><body></body></html>', 'html.parser') soup.body.append(tag) return soup
[ "def", "create_html_from_fragment", "(", "tag", ")", ":", "try", ":", "assert", "isinstance", "(", "tag", ",", "bs4", ".", "element", ".", "Tag", ")", "except", "AssertionError", ":", "raise", "TypeError", "try", ":", "assert", "tag", ".", "find_all", "(", "'body'", ")", "==", "[", "]", "except", "AssertionError", ":", "raise", "ValueError", "soup", "=", "BeautifulSoup", "(", "'<html><head></head><body></body></html>'", ",", "'html.parser'", ")", "soup", ".", "body", ".", "append", "(", "tag", ")", "return", "soup" ]
Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not Args: tag: a bs4.element.Tag Returns:" bs4.element.Tag: A bs4 tag representing a full html document
[ "Creates", "full", "html", "tree", "from", "a", "fragment", ".", "Assumes", "that", "tag", "should", "be", "wrapped", "in", "a", "body", "and", "is", "currently", "not" ]
python
train
26.217391
ryanjdillon/pylleo
pylleo/lleocal.py
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L41-L85
def read_cal(cal_yaml_path): '''Load calibration file if exists, else create Args ---- cal_yaml_path: str Path to calibration YAML file Returns ------- cal_dict: dict Key value pairs of calibration meta data ''' from collections import OrderedDict import datetime import os import warnings import yamlord from . import utils def __create_cal(cal_yaml_path): cal_dict = OrderedDict() # Add experiment name for calibration reference base_path, _ = os.path.split(cal_yaml_path) _, exp_name = os.path.split(base_path) cal_dict['experiment'] = exp_name return cal_dict # Try reading cal file, else create if os.path.isfile(cal_yaml_path): cal_dict = yamlord.read_yaml(cal_yaml_path) else: cal_dict = __create_cal(cal_yaml_path) cal_dict['parameters'] = OrderedDict() for key, val in utils.parse_experiment_params(cal_dict['experiment']).items(): cal_dict[key] = val fmt = "%Y-%m-%d %H:%M:%S" cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt) return cal_dict
[ "def", "read_cal", "(", "cal_yaml_path", ")", ":", "from", "collections", "import", "OrderedDict", "import", "datetime", "import", "os", "import", "warnings", "import", "yamlord", "from", ".", "import", "utils", "def", "__create_cal", "(", "cal_yaml_path", ")", ":", "cal_dict", "=", "OrderedDict", "(", ")", "# Add experiment name for calibration reference", "base_path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "cal_yaml_path", ")", "_", ",", "exp_name", "=", "os", ".", "path", ".", "split", "(", "base_path", ")", "cal_dict", "[", "'experiment'", "]", "=", "exp_name", "return", "cal_dict", "# Try reading cal file, else create", "if", "os", ".", "path", ".", "isfile", "(", "cal_yaml_path", ")", ":", "cal_dict", "=", "yamlord", ".", "read_yaml", "(", "cal_yaml_path", ")", "else", ":", "cal_dict", "=", "__create_cal", "(", "cal_yaml_path", ")", "cal_dict", "[", "'parameters'", "]", "=", "OrderedDict", "(", ")", "for", "key", ",", "val", "in", "utils", ".", "parse_experiment_params", "(", "cal_dict", "[", "'experiment'", "]", ")", ".", "items", "(", ")", ":", "cal_dict", "[", "key", "]", "=", "val", "fmt", "=", "\"%Y-%m-%d %H:%M:%S\"", "cal_dict", "[", "'date_modified'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "fmt", ")", "return", "cal_dict" ]
Load calibration file if exists, else create Args ---- cal_yaml_path: str Path to calibration YAML file Returns ------- cal_dict: dict Key value pairs of calibration meta data
[ "Load", "calibration", "file", "if", "exists", "else", "create" ]
python
train
24.844444
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L3202-L3228
def __set_last_page_screenshot(self): """ self.__last_page_screenshot is only for pytest html report logs self.__last_page_screenshot_png is for all screenshot log files """ if not self.__last_page_screenshot and ( not self.__last_page_screenshot_png): try: element = self.driver.find_element_by_tag_name('body') if self.is_pytest and self.report_on: self.__last_page_screenshot_png = ( self.driver.get_screenshot_as_png()) self.__last_page_screenshot = element.screenshot_as_base64 else: self.__last_page_screenshot_png = element.screenshot_as_png except Exception: if not self.__last_page_screenshot: if self.is_pytest and self.report_on: try: self.__last_page_screenshot = ( self.driver.get_screenshot_as_base64()) except Exception: pass if not self.__last_page_screenshot_png: try: self.__last_page_screenshot_png = ( self.driver.get_screenshot_as_png()) except Exception: pass
[ "def", "__set_last_page_screenshot", "(", "self", ")", ":", "if", "not", "self", ".", "__last_page_screenshot", "and", "(", "not", "self", ".", "__last_page_screenshot_png", ")", ":", "try", ":", "element", "=", "self", ".", "driver", ".", "find_element_by_tag_name", "(", "'body'", ")", "if", "self", ".", "is_pytest", "and", "self", ".", "report_on", ":", "self", ".", "__last_page_screenshot_png", "=", "(", "self", ".", "driver", ".", "get_screenshot_as_png", "(", ")", ")", "self", ".", "__last_page_screenshot", "=", "element", ".", "screenshot_as_base64", "else", ":", "self", ".", "__last_page_screenshot_png", "=", "element", ".", "screenshot_as_png", "except", "Exception", ":", "if", "not", "self", ".", "__last_page_screenshot", ":", "if", "self", ".", "is_pytest", "and", "self", ".", "report_on", ":", "try", ":", "self", ".", "__last_page_screenshot", "=", "(", "self", ".", "driver", ".", "get_screenshot_as_base64", "(", ")", ")", "except", "Exception", ":", "pass", "if", "not", "self", ".", "__last_page_screenshot_png", ":", "try", ":", "self", ".", "__last_page_screenshot_png", "=", "(", "self", ".", "driver", ".", "get_screenshot_as_png", "(", ")", ")", "except", "Exception", ":", "pass" ]
self.__last_page_screenshot is only for pytest html report logs self.__last_page_screenshot_png is for all screenshot log files
[ "self", ".", "__last_page_screenshot", "is", "only", "for", "pytest", "html", "report", "logs", "self", ".", "__last_page_screenshot_png", "is", "for", "all", "screenshot", "log", "files" ]
python
train
50.333333
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3497-L3516
def dvcrss(s1, s2): """ Compute the cross product of two 3-dimensional vectors and the derivative of this cross product. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvcrss_c.html :param s1: Left hand state for cross product and derivative. :type s1: 6-Element Array of floats :param s2: Right hand state for cross product and derivative. :type s2: 6-Element Array of floats :return: State associated with cross product of positions. :rtype: 6-Element Array of floats """ assert len(s1) is 6 and len(s2) is 6 s1 = stypes.toDoubleVector(s1) s2 = stypes.toDoubleVector(s2) sout = stypes.emptyDoubleVector(6) libspice.dvcrss_c(s1, s2, sout) return stypes.cVectorToPython(sout)
[ "def", "dvcrss", "(", "s1", ",", "s2", ")", ":", "assert", "len", "(", "s1", ")", "is", "6", "and", "len", "(", "s2", ")", "is", "6", "s1", "=", "stypes", ".", "toDoubleVector", "(", "s1", ")", "s2", "=", "stypes", ".", "toDoubleVector", "(", "s2", ")", "sout", "=", "stypes", ".", "emptyDoubleVector", "(", "6", ")", "libspice", ".", "dvcrss_c", "(", "s1", ",", "s2", ",", "sout", ")", "return", "stypes", ".", "cVectorToPython", "(", "sout", ")" ]
Compute the cross product of two 3-dimensional vectors and the derivative of this cross product. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvcrss_c.html :param s1: Left hand state for cross product and derivative. :type s1: 6-Element Array of floats :param s2: Right hand state for cross product and derivative. :type s2: 6-Element Array of floats :return: State associated with cross product of positions. :rtype: 6-Element Array of floats
[ "Compute", "the", "cross", "product", "of", "two", "3", "-", "dimensional", "vectors", "and", "the", "derivative", "of", "this", "cross", "product", "." ]
python
train
36.75
dave-shawley/glinda
glinda/content.py
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L74-L99
def register_text_type(content_type, default_encoding, dumper, loader): """ Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines. """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_string = dumper handler.string_to_dict = loader handler.default_encoding = default_encoding or handler.default_encoding
[ "def", "register_text_type", "(", "content_type", ",", "default_encoding", ",", "dumper", ",", "loader", ")", ":", "content_type", "=", "headers", ".", "parse_content_type", "(", "content_type", ")", "content_type", ".", "parameters", ".", "clear", "(", ")", "key", "=", "str", "(", "content_type", ")", "_content_types", "[", "key", "]", "=", "content_type", "handler", "=", "_content_handlers", ".", "setdefault", "(", "key", ",", "_ContentHandler", "(", "key", ")", ")", "handler", ".", "dict_to_string", "=", "dumper", "handler", ".", "string_to_dict", "=", "loader", "handler", ".", "default_encoding", "=", "default_encoding", "or", "handler", ".", "default_encoding" ]
Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines.
[ "Register", "handling", "for", "a", "text", "-", "based", "content", "type", "." ]
python
train
42.269231
stephen-bunn/file-config
src/file_config/_file_config.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/_file_config.py#L429-L440
def from_dict(config_cls, dictionary, validate=False): """ Loads an instance of ``config_cls`` from a dictionary. :param type config_cls: The class to build an instance of :param dict dictionary: The dictionary to load from :param bool validate: Preforms validation before building ``config_cls``, defaults to False, optional :return: An instance of ``config_cls`` :rtype: object """ return _build(config_cls, dictionary, validate=validate)
[ "def", "from_dict", "(", "config_cls", ",", "dictionary", ",", "validate", "=", "False", ")", ":", "return", "_build", "(", "config_cls", ",", "dictionary", ",", "validate", "=", "validate", ")" ]
Loads an instance of ``config_cls`` from a dictionary. :param type config_cls: The class to build an instance of :param dict dictionary: The dictionary to load from :param bool validate: Preforms validation before building ``config_cls``, defaults to False, optional :return: An instance of ``config_cls`` :rtype: object
[ "Loads", "an", "instance", "of", "config_cls", "from", "a", "dictionary", "." ]
python
train
39.25
rytilahti/python-songpal
songpal/method.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/method.py#L107-L115
def asdict(self) -> Dict[str, Union[Dict, Union[str, Dict]]]: """Return a dictionary describing the method. This can be used to dump the information into a JSON file. """ return { "service": self.service.name, **self.signature.serialize(), }
[ "def", "asdict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Union", "[", "Dict", ",", "Union", "[", "str", ",", "Dict", "]", "]", "]", ":", "return", "{", "\"service\"", ":", "self", ".", "service", ".", "name", ",", "*", "*", "self", ".", "signature", ".", "serialize", "(", ")", ",", "}" ]
Return a dictionary describing the method. This can be used to dump the information into a JSON file.
[ "Return", "a", "dictionary", "describing", "the", "method", "." ]
python
train
33.111111
user-cont/colin
colin/utils/cmd_tools.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/utils/cmd_tools.py#L88-L97
def is_rpm_installed(): """Tests if the rpm command is present.""" try: version_result = subprocess.run(["rpm", "--usage"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rpm_installed = not version_result.returncode except FileNotFoundError: rpm_installed = False return rpm_installed
[ "def", "is_rpm_installed", "(", ")", ":", "try", ":", "version_result", "=", "subprocess", ".", "run", "(", "[", "\"rpm\"", ",", "\"--usage\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "rpm_installed", "=", "not", "version_result", ".", "returncode", "except", "FileNotFoundError", ":", "rpm_installed", "=", "False", "return", "rpm_installed" ]
Tests if the rpm command is present.
[ "Tests", "if", "the", "rpm", "command", "is", "present", "." ]
python
train
39.7
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L80-L99
def multinomial_sample(x, vocab_size=None, sampling_method="random", temperature=1.0): """Multinomial sampling from a n-dimensional tensor. Args: x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. vocab_size: Number of classes in multinomial distribution. sampling_method: String, "random" or otherwise deterministic. temperature: Positive float. Returns: Tensor of shape [...]. """ vocab_size = vocab_size or common_layers.shape_list(x)[-1] if sampling_method == "random" and temperature > 0.0: samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1) else: samples = tf.argmax(x, axis=-1) reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) return reshaped_samples
[ "def", "multinomial_sample", "(", "x", ",", "vocab_size", "=", "None", ",", "sampling_method", "=", "\"random\"", ",", "temperature", "=", "1.0", ")", ":", "vocab_size", "=", "vocab_size", "or", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "if", "sampling_method", "==", "\"random\"", "and", "temperature", ">", "0.0", ":", "samples", "=", "tf", ".", "multinomial", "(", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "vocab_size", "]", ")", "/", "temperature", ",", "1", ")", "else", ":", "samples", "=", "tf", ".", "argmax", "(", "x", ",", "axis", "=", "-", "1", ")", "reshaped_samples", "=", "tf", ".", "reshape", "(", "samples", ",", "common_layers", ".", "shape_list", "(", "x", ")", "[", ":", "-", "1", "]", ")", "return", "reshaped_samples" ]
Multinomial sampling from a n-dimensional tensor. Args: x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. vocab_size: Number of classes in multinomial distribution. sampling_method: String, "random" or otherwise deterministic. temperature: Positive float. Returns: Tensor of shape [...].
[ "Multinomial", "sampling", "from", "a", "n", "-", "dimensional", "tensor", "." ]
python
train
39.05
HazyResearch/pdftotree
pdftotree/utils/img_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/img_utils.py#L52-L57
def normalize_pts(pts, ymax, scaler=2): """ scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left) """ return [(x * scaler, ymax - (y * scaler)) for x, y in pts]
[ "def", "normalize_pts", "(", "pts", ",", "ymax", ",", "scaler", "=", "2", ")", ":", "return", "[", "(", "x", "*", "scaler", ",", "ymax", "-", "(", "y", "*", "scaler", ")", ")", "for", "x", ",", "y", "in", "pts", "]" ]
scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left)
[ "scales", "all", "coordinates", "and", "flip", "y", "axis", "due", "to", "different", "origin", "coordinates", "(", "top", "left", "vs", ".", "bottom", "left", ")" ]
python
train
37.166667
pywbem/pywbem
pywbem/mof_compiler.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/mof_compiler.py#L1426-L1439
def p_scope(p): """scope : ',' SCOPE '(' metaElementList ')'""" slist = p[4] scopes = OrderedDict() for i in ('CLASS', 'ASSOCIATION', 'INDICATION', 'PROPERTY', 'REFERENCE', 'METHOD', 'PARAMETER', 'ANY'): scopes[i] = i in slist p[0] = scopes
[ "def", "p_scope", "(", "p", ")", ":", "slist", "=", "p", "[", "4", "]", "scopes", "=", "OrderedDict", "(", ")", "for", "i", "in", "(", "'CLASS'", ",", "'ASSOCIATION'", ",", "'INDICATION'", ",", "'PROPERTY'", ",", "'REFERENCE'", ",", "'METHOD'", ",", "'PARAMETER'", ",", "'ANY'", ")", ":", "scopes", "[", "i", "]", "=", "i", "in", "slist", "p", "[", "0", "]", "=", "scopes" ]
scope : ',' SCOPE '(' metaElementList ')
[ "scope", ":", "SCOPE", "(", "metaElementList", ")" ]
python
train
25.214286
phareous/insteonlocal
insteonlocal/Switch.py
https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Switch.py#L33-L37
def start_all_linking(self, link_type, group_id): """Start all linking""" self.logger.info("Start_all_linking for device %s type %s group %s", self.device_id, link_type, group_id) self.hub.direct_command(self.device_id, '02', '64' + link_type + group_id)
[ "def", "start_all_linking", "(", "self", ",", "link_type", ",", "group_id", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Start_all_linking for device %s type %s group %s\"", ",", "self", ".", "device_id", ",", "link_type", ",", "group_id", ")", "self", ".", "hub", ".", "direct_command", "(", "self", ".", "device_id", ",", "'02'", ",", "'64'", "+", "link_type", "+", "group_id", ")" ]
Start all linking
[ "Start", "all", "linking" ]
python
train
59.8
Jajcus/pyxmpp2
pyxmpp2/xmppstringprep.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/xmppstringprep.py#L101-L135
def prepare(self, data): """Complete string preparation procedure for 'stored' strings. (includes checks for unassigned codes) :Parameters: - `data`: Unicode string to prepare. :return: prepared string :raise StringprepError: if the preparation fails """ ret = self.cache.get(data) if ret is not None: return ret result = self.map(data) if self.normalization: result = self.normalization(result) result = self.prohibit(result) result = self.check_unassigned(result) if self.bidi: result = self.check_bidi(result) if isinstance(result, list): result = u"".join() if len(self.cache_items) >= _stringprep_cache_size: remove = self.cache_items[: -_stringprep_cache_size // 2] for profile, key in remove: try: del profile.cache[key] except KeyError: pass self.cache_items[:] = self.cache_items[ -_stringprep_cache_size // 2 :] self.cache_items.append((self, data)) self.cache[data] = result return result
[ "def", "prepare", "(", "self", ",", "data", ")", ":", "ret", "=", "self", ".", "cache", ".", "get", "(", "data", ")", "if", "ret", "is", "not", "None", ":", "return", "ret", "result", "=", "self", ".", "map", "(", "data", ")", "if", "self", ".", "normalization", ":", "result", "=", "self", ".", "normalization", "(", "result", ")", "result", "=", "self", ".", "prohibit", "(", "result", ")", "result", "=", "self", ".", "check_unassigned", "(", "result", ")", "if", "self", ".", "bidi", ":", "result", "=", "self", ".", "check_bidi", "(", "result", ")", "if", "isinstance", "(", "result", ",", "list", ")", ":", "result", "=", "u\"\"", ".", "join", "(", ")", "if", "len", "(", "self", ".", "cache_items", ")", ">=", "_stringprep_cache_size", ":", "remove", "=", "self", ".", "cache_items", "[", ":", "-", "_stringprep_cache_size", "//", "2", "]", "for", "profile", ",", "key", "in", "remove", ":", "try", ":", "del", "profile", ".", "cache", "[", "key", "]", "except", "KeyError", ":", "pass", "self", ".", "cache_items", "[", ":", "]", "=", "self", ".", "cache_items", "[", "-", "_stringprep_cache_size", "//", "2", ":", "]", "self", ".", "cache_items", ".", "append", "(", "(", "self", ",", "data", ")", ")", "self", ".", "cache", "[", "data", "]", "=", "result", "return", "result" ]
Complete string preparation procedure for 'stored' strings. (includes checks for unassigned codes) :Parameters: - `data`: Unicode string to prepare. :return: prepared string :raise StringprepError: if the preparation fails
[ "Complete", "string", "preparation", "procedure", "for", "stored", "strings", ".", "(", "includes", "checks", "for", "unassigned", "codes", ")" ]
python
valid
35.085714
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L157-L170
def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command"
[ "def", "deregister_image", "(", "self", ",", "ami_id", ",", "region", "=", "'us-east-1'", ")", ":", "deregister_cmd", "=", "\"aws ec2 --profile {} --region {} deregister-image --image-id {}\"", ".", "format", "(", "self", ".", "aws_project", ",", "region", ",", "ami_id", ")", "print", "\"De-registering old image, now that the new one exists.\"", "print", "\"De-registering cmd: {}\"", ".", "format", "(", "deregister_cmd", ")", "res", "=", "subprocess", ".", "check_output", "(", "shlex", ".", "split", "(", "deregister_cmd", ")", ")", "print", "\"Response: {}\"", ".", "format", "(", "res", ")", "print", "\"Not monitoring de-register command\"" ]
Deregister an AMI by id :param ami_id: :param region: region to deregister from :return:
[ "Deregister", "an", "AMI", "by", "id", ":", "param", "ami_id", ":", ":", "param", "region", ":", "region", "to", "deregister", "from", ":", "return", ":" ]
python
train
44.642857
RudolfCardinal/pythonlib
cardinal_pythonlib/interval.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L797-L805
def total_duration(self) -> datetime.timedelta: """ Returns a ``datetime.timedelta`` object with the total sum of durations. If there is overlap, time will be double-counted, so beware! """ total = datetime.timedelta() for interval in self.intervals: total += interval.duration() return total
[ "def", "total_duration", "(", "self", ")", "->", "datetime", ".", "timedelta", ":", "total", "=", "datetime", ".", "timedelta", "(", ")", "for", "interval", "in", "self", ".", "intervals", ":", "total", "+=", "interval", ".", "duration", "(", ")", "return", "total" ]
Returns a ``datetime.timedelta`` object with the total sum of durations. If there is overlap, time will be double-counted, so beware!
[ "Returns", "a", "datetime", ".", "timedelta", "object", "with", "the", "total", "sum", "of", "durations", ".", "If", "there", "is", "overlap", "time", "will", "be", "double", "-", "counted", "so", "beware!" ]
python
train
39.111111
PaloAltoNetworks/pancloud
pancloud/credentials.py
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/credentials.py#L502-L533
def revoke_access_token(self, **kwargs): """Revoke access token.""" c = self.get_credentials() data = { 'client_id': c.client_id, 'client_secret': c.client_secret, 'token': c.access_token, 'token_type_hint': 'access_token' } r = self._httpclient.request( method='POST', url=self.token_url, json=data, path='/api/oauth2/RevokeToken', **kwargs ) if not r.ok: raise PanCloudError( '%s %s: %s' % (r.status_code, r.reason, r.text) ) try: r_json = r.json() except ValueError as e: raise PanCloudError("Invalid JSON: %s" % e) else: if r.json().get( 'error_description' ) or r.json().get( 'error' ): raise PanCloudError(r.text) return r_json
[ "def", "revoke_access_token", "(", "self", ",", "*", "*", "kwargs", ")", ":", "c", "=", "self", ".", "get_credentials", "(", ")", "data", "=", "{", "'client_id'", ":", "c", ".", "client_id", ",", "'client_secret'", ":", "c", ".", "client_secret", ",", "'token'", ":", "c", ".", "access_token", ",", "'token_type_hint'", ":", "'access_token'", "}", "r", "=", "self", ".", "_httpclient", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "self", ".", "token_url", ",", "json", "=", "data", ",", "path", "=", "'/api/oauth2/RevokeToken'", ",", "*", "*", "kwargs", ")", "if", "not", "r", ".", "ok", ":", "raise", "PanCloudError", "(", "'%s %s: %s'", "%", "(", "r", ".", "status_code", ",", "r", ".", "reason", ",", "r", ".", "text", ")", ")", "try", ":", "r_json", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "e", ":", "raise", "PanCloudError", "(", "\"Invalid JSON: %s\"", "%", "e", ")", "else", ":", "if", "r", ".", "json", "(", ")", ".", "get", "(", "'error_description'", ")", "or", "r", ".", "json", "(", ")", ".", "get", "(", "'error'", ")", ":", "raise", "PanCloudError", "(", "r", ".", "text", ")", "return", "r_json" ]
Revoke access token.
[ "Revoke", "access", "token", "." ]
python
train
29.6875
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L1789-L1826
def get_local_part(value): """ local-part = dot-atom / quoted-string / obs-local-part """ local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( "expected local-part but found '{}'".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( "local-part is not dot-atom, quoted-string, or obs-local-part")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( "local-part is not a dot-atom (contains CFWS)")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( "local-part contains non-ASCII characters)")) return local_part, value
[ "def", "get_local_part", "(", "value", ")", ":", "local_part", "=", "LocalPart", "(", ")", "leader", "=", "None", "if", "value", "[", "0", "]", "in", "CFWS_LEADER", ":", "leader", ",", "value", "=", "get_cfws", "(", "value", ")", "if", "not", "value", ":", "raise", "errors", ".", "HeaderParseError", "(", "\"expected local-part but found '{}'\"", ".", "format", "(", "value", ")", ")", "try", ":", "token", ",", "value", "=", "get_dot_atom", "(", "value", ")", "except", "errors", ".", "HeaderParseError", ":", "try", ":", "token", ",", "value", "=", "get_word", "(", "value", ")", "except", "errors", ".", "HeaderParseError", ":", "if", "value", "[", "0", "]", "!=", "'\\\\'", "and", "value", "[", "0", "]", "in", "PHRASE_ENDS", ":", "raise", "token", "=", "TokenList", "(", ")", "if", "leader", "is", "not", "None", ":", "token", "[", ":", "0", "]", "=", "[", "leader", "]", "local_part", ".", "append", "(", "token", ")", "if", "value", "and", "(", "value", "[", "0", "]", "==", "'\\\\'", "or", "value", "[", "0", "]", "not", "in", "PHRASE_ENDS", ")", ":", "obs_local_part", ",", "value", "=", "get_obs_local_part", "(", "str", "(", "local_part", ")", "+", "value", ")", "if", "obs_local_part", ".", "token_type", "==", "'invalid-obs-local-part'", ":", "local_part", ".", "defects", ".", "append", "(", "errors", ".", "InvalidHeaderDefect", "(", "\"local-part is not dot-atom, quoted-string, or obs-local-part\"", ")", ")", "else", ":", "local_part", ".", "defects", ".", "append", "(", "errors", ".", "ObsoleteHeaderDefect", "(", "\"local-part is not a dot-atom (contains CFWS)\"", ")", ")", "local_part", "[", "0", "]", "=", "obs_local_part", "try", ":", "local_part", ".", "value", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "local_part", ".", "defects", ".", "append", "(", "errors", ".", "NonASCIILocalPartDefect", "(", "\"local-part contains non-ASCII characters)\"", ")", ")", "return", "local_part", ",", "value" ]
local-part = dot-atom / quoted-string / obs-local-part
[ "local", "-", "part", "=", "dot", "-", "atom", "/", "quoted", "-", "string", "/", "obs", "-", "local", "-", "part" ]
python
train
38.5
saltstack/salt
salt/utils/win_dacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_dacl.py#L1864-L2029
def copy_security(source, target, obj_type='file', copy_owner=True, copy_group=True, copy_dacl=True, copy_sacl=True): r''' Copy the security descriptor of the Source to the Target. You can specify a specific portion of the security descriptor to copy using one of the `copy_*` parameters. .. note:: At least one `copy_*` parameter must be ``True`` .. note:: The user account running this command must have the following privileges: - SeTakeOwnershipPrivilege - SeRestorePrivilege - SeSecurityPrivilege Args: source (str): The full path to the source. This is where the security info will be copied from target (str): The full path to the target. This is where the security info will be applied obj_type (str): file The type of object to query. This value changes the format of the ``obj_name`` parameter as follows: - file: indicates a file or directory - a relative path, such as ``FileName.txt`` or ``..\FileName`` - an absolute path, such as ``C:\DirName\FileName.txt`` - A UNC name, such as ``\\ServerName\ShareName\FileName.txt`` - service: indicates the name of a Windows service - printer: indicates the name of a printer - registry: indicates a registry key - Uses the following literal strings to denote the hive: - HKEY_LOCAL_MACHINE - MACHINE - HKLM - HKEY_USERS - USERS - HKU - HKEY_CURRENT_USER - CURRENT_USER - HKCU - HKEY_CLASSES_ROOT - CLASSES_ROOT - HKCR - Should be in the format of ``HIVE\Path\To\Key``. For example, ``HKLM\SOFTWARE\Windows`` - registry32: indicates a registry key under WOW64. Formatting is the same as it is for ``registry`` - share: indicates a network share copy_owner (bool): True ``True`` copies owner information. Default is ``True`` copy_group (bool): True ``True`` copies group information. Default is ``True`` copy_dacl (bool): True ``True`` copies the DACL. Default is ``True`` copy_sacl (bool): True ``True`` copies the SACL. Default is ``True`` Returns: bool: ``True`` if successful Raises: SaltInvocationError: When parameters are invalid CommandExecutionError: On failure to set security Usage: .. code-block:: python salt.utils.win_dacl.copy_security( source='C:\\temp\\source_file.txt', target='C:\\temp\\target_file.txt', obj_type='file') salt.utils.win_dacl.copy_security( source='HKLM\\SOFTWARE\\salt\\test_source', target='HKLM\\SOFTWARE\\salt\\test_target', obj_type='registry', copy_owner=False) ''' obj_dacl = dacl(obj_type=obj_type) if 'registry' in obj_type.lower(): source = obj_dacl.get_reg_name(source) log.info('Source converted to: %s', source) target = obj_dacl.get_reg_name(target) log.info('Target converted to: %s', target) # Set flags try: obj_type_flag = flags().obj_type[obj_type.lower()] except KeyError: raise SaltInvocationError( 'Invalid "obj_type" passed: {0}'.format(obj_type)) security_flags = 0 if copy_owner: security_flags |= win32security.OWNER_SECURITY_INFORMATION if copy_group: security_flags |= win32security.GROUP_SECURITY_INFORMATION if copy_dacl: security_flags |= win32security.DACL_SECURITY_INFORMATION if copy_sacl: security_flags |= win32security.SACL_SECURITY_INFORMATION if not security_flags: raise SaltInvocationError( 'One of copy_owner, copy_group, copy_dacl, or copy_sacl must be ' 'True') # To set the owner to something other than the logged in user requires # SE_TAKE_OWNERSHIP_NAME and SE_RESTORE_NAME privileges # Enable them for the logged in user # Setup the privilege set new_privs = set() luid = win32security.LookupPrivilegeValue('', 'SeTakeOwnershipPrivilege') new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED)) luid = win32security.LookupPrivilegeValue('', 'SeRestorePrivilege') new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED)) luid = win32security.LookupPrivilegeValue('', 'SeSecurityPrivilege') new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED)) # Get the current token p_handle = win32api.GetCurrentProcess() t_handle = win32security.OpenProcessToken( p_handle, win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES) # Enable the privileges win32security.AdjustTokenPrivileges(t_handle, 0, new_privs) # Load object Security Info from the Source sec = win32security.GetNamedSecurityInfo( source, obj_type_flag, security_flags) # The following return None if the corresponding flag is not set sd_sid = sec.GetSecurityDescriptorOwner() sd_gid = sec.GetSecurityDescriptorGroup() sd_dacl = sec.GetSecurityDescriptorDacl() sd_sacl = sec.GetSecurityDescriptorSacl() # Set Security info on the target try: win32security.SetNamedSecurityInfo( target, obj_type_flag, security_flags, sd_sid, sd_gid, sd_dacl, sd_sacl) except pywintypes.error as exc: raise CommandExecutionError( 'Failed to set security info: {0}'.format(exc.strerror)) return True
[ "def", "copy_security", "(", "source", ",", "target", ",", "obj_type", "=", "'file'", ",", "copy_owner", "=", "True", ",", "copy_group", "=", "True", ",", "copy_dacl", "=", "True", ",", "copy_sacl", "=", "True", ")", ":", "obj_dacl", "=", "dacl", "(", "obj_type", "=", "obj_type", ")", "if", "'registry'", "in", "obj_type", ".", "lower", "(", ")", ":", "source", "=", "obj_dacl", ".", "get_reg_name", "(", "source", ")", "log", ".", "info", "(", "'Source converted to: %s'", ",", "source", ")", "target", "=", "obj_dacl", ".", "get_reg_name", "(", "target", ")", "log", ".", "info", "(", "'Target converted to: %s'", ",", "target", ")", "# Set flags", "try", ":", "obj_type_flag", "=", "flags", "(", ")", ".", "obj_type", "[", "obj_type", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "raise", "SaltInvocationError", "(", "'Invalid \"obj_type\" passed: {0}'", ".", "format", "(", "obj_type", ")", ")", "security_flags", "=", "0", "if", "copy_owner", ":", "security_flags", "|=", "win32security", ".", "OWNER_SECURITY_INFORMATION", "if", "copy_group", ":", "security_flags", "|=", "win32security", ".", "GROUP_SECURITY_INFORMATION", "if", "copy_dacl", ":", "security_flags", "|=", "win32security", ".", "DACL_SECURITY_INFORMATION", "if", "copy_sacl", ":", "security_flags", "|=", "win32security", ".", "SACL_SECURITY_INFORMATION", "if", "not", "security_flags", ":", "raise", "SaltInvocationError", "(", "'One of copy_owner, copy_group, copy_dacl, or copy_sacl must be '", "'True'", ")", "# To set the owner to something other than the logged in user requires", "# SE_TAKE_OWNERSHIP_NAME and SE_RESTORE_NAME privileges", "# Enable them for the logged in user", "# Setup the privilege set", "new_privs", "=", "set", "(", ")", "luid", "=", "win32security", ".", "LookupPrivilegeValue", "(", "''", ",", "'SeTakeOwnershipPrivilege'", ")", "new_privs", ".", "add", "(", "(", "luid", ",", "win32con", ".", "SE_PRIVILEGE_ENABLED", ")", ")", "luid", "=", "win32security", ".", "LookupPrivilegeValue", "(", "''", ",", "'SeRestorePrivilege'", ")", "new_privs", ".", "add", "(", "(", "luid", ",", "win32con", ".", "SE_PRIVILEGE_ENABLED", ")", ")", "luid", "=", "win32security", ".", "LookupPrivilegeValue", "(", "''", ",", "'SeSecurityPrivilege'", ")", "new_privs", ".", "add", "(", "(", "luid", ",", "win32con", ".", "SE_PRIVILEGE_ENABLED", ")", ")", "# Get the current token", "p_handle", "=", "win32api", ".", "GetCurrentProcess", "(", ")", "t_handle", "=", "win32security", ".", "OpenProcessToken", "(", "p_handle", ",", "win32security", ".", "TOKEN_ALL_ACCESS", "|", "win32con", ".", "TOKEN_ADJUST_PRIVILEGES", ")", "# Enable the privileges", "win32security", ".", "AdjustTokenPrivileges", "(", "t_handle", ",", "0", ",", "new_privs", ")", "# Load object Security Info from the Source", "sec", "=", "win32security", ".", "GetNamedSecurityInfo", "(", "source", ",", "obj_type_flag", ",", "security_flags", ")", "# The following return None if the corresponding flag is not set", "sd_sid", "=", "sec", ".", "GetSecurityDescriptorOwner", "(", ")", "sd_gid", "=", "sec", ".", "GetSecurityDescriptorGroup", "(", ")", "sd_dacl", "=", "sec", ".", "GetSecurityDescriptorDacl", "(", ")", "sd_sacl", "=", "sec", ".", "GetSecurityDescriptorSacl", "(", ")", "# Set Security info on the target", "try", ":", "win32security", ".", "SetNamedSecurityInfo", "(", "target", ",", "obj_type_flag", ",", "security_flags", ",", "sd_sid", ",", "sd_gid", ",", "sd_dacl", ",", "sd_sacl", ")", "except", "pywintypes", ".", "error", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Failed to set security info: {0}'", ".", "format", "(", "exc", ".", "strerror", ")", ")", "return", "True" ]
r''' Copy the security descriptor of the Source to the Target. You can specify a specific portion of the security descriptor to copy using one of the `copy_*` parameters. .. note:: At least one `copy_*` parameter must be ``True`` .. note:: The user account running this command must have the following privileges: - SeTakeOwnershipPrivilege - SeRestorePrivilege - SeSecurityPrivilege Args: source (str): The full path to the source. This is where the security info will be copied from target (str): The full path to the target. This is where the security info will be applied obj_type (str): file The type of object to query. This value changes the format of the ``obj_name`` parameter as follows: - file: indicates a file or directory - a relative path, such as ``FileName.txt`` or ``..\FileName`` - an absolute path, such as ``C:\DirName\FileName.txt`` - A UNC name, such as ``\\ServerName\ShareName\FileName.txt`` - service: indicates the name of a Windows service - printer: indicates the name of a printer - registry: indicates a registry key - Uses the following literal strings to denote the hive: - HKEY_LOCAL_MACHINE - MACHINE - HKLM - HKEY_USERS - USERS - HKU - HKEY_CURRENT_USER - CURRENT_USER - HKCU - HKEY_CLASSES_ROOT - CLASSES_ROOT - HKCR - Should be in the format of ``HIVE\Path\To\Key``. For example, ``HKLM\SOFTWARE\Windows`` - registry32: indicates a registry key under WOW64. Formatting is the same as it is for ``registry`` - share: indicates a network share copy_owner (bool): True ``True`` copies owner information. Default is ``True`` copy_group (bool): True ``True`` copies group information. Default is ``True`` copy_dacl (bool): True ``True`` copies the DACL. Default is ``True`` copy_sacl (bool): True ``True`` copies the SACL. Default is ``True`` Returns: bool: ``True`` if successful Raises: SaltInvocationError: When parameters are invalid CommandExecutionError: On failure to set security Usage: .. code-block:: python salt.utils.win_dacl.copy_security( source='C:\\temp\\source_file.txt', target='C:\\temp\\target_file.txt', obj_type='file') salt.utils.win_dacl.copy_security( source='HKLM\\SOFTWARE\\salt\\test_source', target='HKLM\\SOFTWARE\\salt\\test_target', obj_type='registry', copy_owner=False)
[ "r", "Copy", "the", "security", "descriptor", "of", "the", "Source", "to", "the", "Target", ".", "You", "can", "specify", "a", "specific", "portion", "of", "the", "security", "descriptor", "to", "copy", "using", "one", "of", "the", "copy_", "*", "parameters", "." ]
python
train
35.060241
chimera0/accel-brain-code
Reinforcement-Learning/demo/demo_autocompletion.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/demo/demo_autocompletion.py#L62-L78
def extract_possible_actions(self, state_key): ''' Concreat method. Args: state_key The key of state. this value is point in map. Returns: [(x, y)] ''' if state_key in self.__state_action_list_dict: return self.__state_action_list_dict[state_key] else: action_list = [] state_key_list = [action_list.extend(self.__state_action_list_dict[k]) for k in self.__state_action_list_dict.keys() if len([s for s in state_key if s in k]) > 0] return action_list
[ "def", "extract_possible_actions", "(", "self", ",", "state_key", ")", ":", "if", "state_key", "in", "self", ".", "__state_action_list_dict", ":", "return", "self", ".", "__state_action_list_dict", "[", "state_key", "]", "else", ":", "action_list", "=", "[", "]", "state_key_list", "=", "[", "action_list", ".", "extend", "(", "self", ".", "__state_action_list_dict", "[", "k", "]", ")", "for", "k", "in", "self", ".", "__state_action_list_dict", ".", "keys", "(", ")", "if", "len", "(", "[", "s", "for", "s", "in", "state_key", "if", "s", "in", "k", "]", ")", ">", "0", "]", "return", "action_list" ]
Concreat method. Args: state_key The key of state. this value is point in map. Returns: [(x, y)]
[ "Concreat", "method", "." ]
python
train
33.647059
googleapis/google-cloud-python
datastore/google/cloud/datastore/_http.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/_http.py#L38-L78
def _request(http, project, method, data, base_url): """Make a request over the Http transport to the Cloud Datastore API. :type http: :class:`requests.Session` :param http: HTTP object to make requests. :type project: str :param project: The project to make the request for. :type method: str :param method: The API call method name (ie, ``runQuery``, ``lookup``, etc) :type data: str :param data: The data to send with the API call. Typically this is a serialized Protobuf string. :type base_url: str :param base_url: The base URL where the API lives. :rtype: str :returns: The string response content from the API call. :raises: :class:`google.cloud.exceptions.GoogleCloudError` if the response code is not 200 OK. """ headers = { "Content-Type": "application/x-protobuf", "User-Agent": connection_module.DEFAULT_USER_AGENT, connection_module.CLIENT_INFO_HEADER: _CLIENT_INFO, } api_url = build_api_url(project, method, base_url) response = http.request(url=api_url, method="POST", headers=headers, data=data) if response.status_code != 200: error_status = status_pb2.Status.FromString(response.content) raise exceptions.from_http_status( response.status_code, error_status.message, errors=[error_status] ) return response.content
[ "def", "_request", "(", "http", ",", "project", ",", "method", ",", "data", ",", "base_url", ")", ":", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/x-protobuf\"", ",", "\"User-Agent\"", ":", "connection_module", ".", "DEFAULT_USER_AGENT", ",", "connection_module", ".", "CLIENT_INFO_HEADER", ":", "_CLIENT_INFO", ",", "}", "api_url", "=", "build_api_url", "(", "project", ",", "method", ",", "base_url", ")", "response", "=", "http", ".", "request", "(", "url", "=", "api_url", ",", "method", "=", "\"POST\"", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "if", "response", ".", "status_code", "!=", "200", ":", "error_status", "=", "status_pb2", ".", "Status", ".", "FromString", "(", "response", ".", "content", ")", "raise", "exceptions", ".", "from_http_status", "(", "response", ".", "status_code", ",", "error_status", ".", "message", ",", "errors", "=", "[", "error_status", "]", ")", "return", "response", ".", "content" ]
Make a request over the Http transport to the Cloud Datastore API. :type http: :class:`requests.Session` :param http: HTTP object to make requests. :type project: str :param project: The project to make the request for. :type method: str :param method: The API call method name (ie, ``runQuery``, ``lookup``, etc) :type data: str :param data: The data to send with the API call. Typically this is a serialized Protobuf string. :type base_url: str :param base_url: The base URL where the API lives. :rtype: str :returns: The string response content from the API call. :raises: :class:`google.cloud.exceptions.GoogleCloudError` if the response code is not 200 OK.
[ "Make", "a", "request", "over", "the", "Http", "transport", "to", "the", "Cloud", "Datastore", "API", "." ]
python
train
34.02439
pepkit/peppy
peppy/utils.py
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L121-L128
def expandpath(path): """ Expand a filesystem path that may or may not contain user/env vars. :param str path: path to expand :return str: expanded version of input path """ return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
[ "def", "expandpath", "(", "path", ")", ":", "return", "os", ".", "path", ".", "expandvars", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", ".", "replace", "(", "\"//\"", ",", "\"/\"", ")" ]
Expand a filesystem path that may or may not contain user/env vars. :param str path: path to expand :return str: expanded version of input path
[ "Expand", "a", "filesystem", "path", "that", "may", "or", "may", "not", "contain", "user", "/", "env", "vars", "." ]
python
train
32.75
turicas/rows
rows/utils.py
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L983-L1079
def generate_schema(table, export_fields, output_format, output_fobj): """Generate table schema for a specific output format and write Current supported output formats: 'txt', 'sql' and 'django'. The table name and all fields names pass for a slugifying process (table name is taken from file name). """ if output_format in ("csv", "txt"): from rows import plugins data = [ { "field_name": fieldname, "field_type": fieldtype.__name__.replace("Field", "").lower(), } for fieldname, fieldtype in table.fields.items() if fieldname in export_fields ] table = plugins.dicts.import_from_dicts( data, import_fields=["field_name", "field_type"] ) if output_format == "txt": plugins.txt.export_to_txt(table, output_fobj) elif output_format == "csv": plugins.csv.export_to_csv(table, output_fobj) elif output_format == "sql": # TODO: may use dict from rows.plugins.sqlite or postgresql sql_fields = { rows.fields.BinaryField: "BLOB", rows.fields.BoolField: "BOOL", rows.fields.IntegerField: "INT", rows.fields.FloatField: "FLOAT", rows.fields.PercentField: "FLOAT", rows.fields.DateField: "DATE", rows.fields.DatetimeField: "DATETIME", rows.fields.TextField: "TEXT", rows.fields.DecimalField: "FLOAT", rows.fields.EmailField: "TEXT", rows.fields.JSONField: "TEXT", } fields = [ " {} {}".format(field_name, sql_fields[field_type]) for field_name, field_type in table.fields.items() if field_name in export_fields ] sql = ( dedent( """ CREATE TABLE IF NOT EXISTS {name} ( {fields} ); """ ) .strip() .format(name=table.name, fields=",\n".join(fields)) + "\n" ) output_fobj.write(sql) elif output_format == "django": django_fields = { rows.fields.BinaryField: "BinaryField", rows.fields.BoolField: "BooleanField", rows.fields.IntegerField: "IntegerField", rows.fields.FloatField: "FloatField", rows.fields.PercentField: "DecimalField", rows.fields.DateField: "DateField", rows.fields.DatetimeField: "DateTimeField", rows.fields.TextField: "TextField", rows.fields.DecimalField: "DecimalField", rows.fields.EmailField: "EmailField", rows.fields.JSONField: "JSONField", } table_name = "".join(word.capitalize() for word in table.name.split("_")) lines = ["from django.db import models"] if rows.fields.JSONField in [ table.fields[field_name] for field_name in export_fields ]: lines.append("from django.contrib.postgres.fields import JSONField") lines.append("") lines.append("class {}(models.Model):".format(table_name)) for field_name, field_type in table.fields.items(): if field_name not in export_fields: continue if field_type is not rows.fields.JSONField: django_type = "models.{}()".format(django_fields[field_type]) else: django_type = "JSONField()" lines.append(" {} = {}".format(field_name, django_type)) result = "\n".join(lines) + "\n" output_fobj.write(result)
[ "def", "generate_schema", "(", "table", ",", "export_fields", ",", "output_format", ",", "output_fobj", ")", ":", "if", "output_format", "in", "(", "\"csv\"", ",", "\"txt\"", ")", ":", "from", "rows", "import", "plugins", "data", "=", "[", "{", "\"field_name\"", ":", "fieldname", ",", "\"field_type\"", ":", "fieldtype", ".", "__name__", ".", "replace", "(", "\"Field\"", ",", "\"\"", ")", ".", "lower", "(", ")", ",", "}", "for", "fieldname", ",", "fieldtype", "in", "table", ".", "fields", ".", "items", "(", ")", "if", "fieldname", "in", "export_fields", "]", "table", "=", "plugins", ".", "dicts", ".", "import_from_dicts", "(", "data", ",", "import_fields", "=", "[", "\"field_name\"", ",", "\"field_type\"", "]", ")", "if", "output_format", "==", "\"txt\"", ":", "plugins", ".", "txt", ".", "export_to_txt", "(", "table", ",", "output_fobj", ")", "elif", "output_format", "==", "\"csv\"", ":", "plugins", ".", "csv", ".", "export_to_csv", "(", "table", ",", "output_fobj", ")", "elif", "output_format", "==", "\"sql\"", ":", "# TODO: may use dict from rows.plugins.sqlite or postgresql", "sql_fields", "=", "{", "rows", ".", "fields", ".", "BinaryField", ":", "\"BLOB\"", ",", "rows", ".", "fields", ".", "BoolField", ":", "\"BOOL\"", ",", "rows", ".", "fields", ".", "IntegerField", ":", "\"INT\"", ",", "rows", ".", "fields", ".", "FloatField", ":", "\"FLOAT\"", ",", "rows", ".", "fields", ".", "PercentField", ":", "\"FLOAT\"", ",", "rows", ".", "fields", ".", "DateField", ":", "\"DATE\"", ",", "rows", ".", "fields", ".", "DatetimeField", ":", "\"DATETIME\"", ",", "rows", ".", "fields", ".", "TextField", ":", "\"TEXT\"", ",", "rows", ".", "fields", ".", "DecimalField", ":", "\"FLOAT\"", ",", "rows", ".", "fields", ".", "EmailField", ":", "\"TEXT\"", ",", "rows", ".", "fields", ".", "JSONField", ":", "\"TEXT\"", ",", "}", "fields", "=", "[", "\" {} {}\"", ".", "format", "(", "field_name", ",", "sql_fields", "[", "field_type", "]", ")", "for", "field_name", ",", "field_type", "in", "table", ".", "fields", ".", "items", "(", ")", "if", "field_name", "in", "export_fields", "]", "sql", "=", "(", "dedent", "(", "\"\"\"\n CREATE TABLE IF NOT EXISTS {name} (\n {fields}\n );\n \"\"\"", ")", ".", "strip", "(", ")", ".", "format", "(", "name", "=", "table", ".", "name", ",", "fields", "=", "\",\\n\"", ".", "join", "(", "fields", ")", ")", "+", "\"\\n\"", ")", "output_fobj", ".", "write", "(", "sql", ")", "elif", "output_format", "==", "\"django\"", ":", "django_fields", "=", "{", "rows", ".", "fields", ".", "BinaryField", ":", "\"BinaryField\"", ",", "rows", ".", "fields", ".", "BoolField", ":", "\"BooleanField\"", ",", "rows", ".", "fields", ".", "IntegerField", ":", "\"IntegerField\"", ",", "rows", ".", "fields", ".", "FloatField", ":", "\"FloatField\"", ",", "rows", ".", "fields", ".", "PercentField", ":", "\"DecimalField\"", ",", "rows", ".", "fields", ".", "DateField", ":", "\"DateField\"", ",", "rows", ".", "fields", ".", "DatetimeField", ":", "\"DateTimeField\"", ",", "rows", ".", "fields", ".", "TextField", ":", "\"TextField\"", ",", "rows", ".", "fields", ".", "DecimalField", ":", "\"DecimalField\"", ",", "rows", ".", "fields", ".", "EmailField", ":", "\"EmailField\"", ",", "rows", ".", "fields", ".", "JSONField", ":", "\"JSONField\"", ",", "}", "table_name", "=", "\"\"", ".", "join", "(", "word", ".", "capitalize", "(", ")", "for", "word", "in", "table", ".", "name", ".", "split", "(", "\"_\"", ")", ")", "lines", "=", "[", "\"from django.db import models\"", "]", "if", "rows", ".", "fields", ".", "JSONField", "in", "[", "table", ".", "fields", "[", "field_name", "]", "for", "field_name", "in", "export_fields", "]", ":", "lines", ".", "append", "(", "\"from django.contrib.postgres.fields import JSONField\"", ")", "lines", ".", "append", "(", "\"\"", ")", "lines", ".", "append", "(", "\"class {}(models.Model):\"", ".", "format", "(", "table_name", ")", ")", "for", "field_name", ",", "field_type", "in", "table", ".", "fields", ".", "items", "(", ")", ":", "if", "field_name", "not", "in", "export_fields", ":", "continue", "if", "field_type", "is", "not", "rows", ".", "fields", ".", "JSONField", ":", "django_type", "=", "\"models.{}()\"", ".", "format", "(", "django_fields", "[", "field_type", "]", ")", "else", ":", "django_type", "=", "\"JSONField()\"", "lines", ".", "append", "(", "\" {} = {}\"", ".", "format", "(", "field_name", ",", "django_type", ")", ")", "result", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "+", "\"\\n\"", "output_fobj", ".", "write", "(", "result", ")" ]
Generate table schema for a specific output format and write Current supported output formats: 'txt', 'sql' and 'django'. The table name and all fields names pass for a slugifying process (table name is taken from file name).
[ "Generate", "table", "schema", "for", "a", "specific", "output", "format", "and", "write" ]
python
train
37.041237
peterldowns/python-mustache
mustache/loading.py
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/loading.py#L26-L42
def get_abs_template_path(template_name, directory, extension): """ Given a template name, a directory, and an extension, return the absolute path to the template. """ # Get the relative path relative_path = join(directory, template_name) file_with_ext = template_name if extension: # If there is a default extension, but no file extension, then add it file_name, file_ext = splitext(file_with_ext) if not file_ext: file_with_ext = extsep.join( (file_name, extension.replace(extsep, ''))) # Rebuild the relative path relative_path = join(directory, file_with_ext) return abspath(relative_path)
[ "def", "get_abs_template_path", "(", "template_name", ",", "directory", ",", "extension", ")", ":", "# Get the relative path", "relative_path", "=", "join", "(", "directory", ",", "template_name", ")", "file_with_ext", "=", "template_name", "if", "extension", ":", "# If there is a default extension, but no file extension, then add it", "file_name", ",", "file_ext", "=", "splitext", "(", "file_with_ext", ")", "if", "not", "file_ext", ":", "file_with_ext", "=", "extsep", ".", "join", "(", "(", "file_name", ",", "extension", ".", "replace", "(", "extsep", ",", "''", ")", ")", ")", "# Rebuild the relative path", "relative_path", "=", "join", "(", "directory", ",", "file_with_ext", ")", "return", "abspath", "(", "relative_path", ")" ]
Given a template name, a directory, and an extension, return the absolute path to the template.
[ "Given", "a", "template", "name", "a", "directory", "and", "an", "extension", "return", "the", "absolute", "path", "to", "the", "template", "." ]
python
train
40.176471
blockstack/blockstack-files
blockstack_file/blockstack_file.py
https://github.com/blockstack/blockstack-files/blob/8d88cc48bdf8ed57f17d4bba860e972bde321921/blockstack_file/blockstack_file.py#L249-L273
def file_key_regenerate( blockchain_id, hostname, config_path=CONFIG_PATH, wallet_keys=None ): """ Generate a new encryption key. Retire the existing key, if it exists. Return {'status': True} on success Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) current_key = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path ) if 'status' in current_key and current_key['status']: # retire # NOTE: implicitly depends on this method failing only because the key doesn't exist res = file_key_retire( blockchain_id, current_key, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in res: log.error("Failed to retire key %s: %s" % (current_key['key_id'], res['error'])) return {'error': 'Failed to retire key'} # make a new key res = blockstack_gpg.gpg_app_create_key( blockchain_id, "files", hostname, wallet_keys=wallet_keys, config_dir=config_dir ) if 'error' in res: log.error("Failed to generate new key: %s" % res['error']) return {'error': 'Failed to generate new key'} return {'status': True}
[ "def", "file_key_regenerate", "(", "blockchain_id", ",", "hostname", ",", "config_path", "=", "CONFIG_PATH", ",", "wallet_keys", "=", "None", ")", ":", "config_dir", "=", "os", ".", "path", ".", "dirname", "(", "config_path", ")", "current_key", "=", "file_key_lookup", "(", "blockchain_id", ",", "0", ",", "hostname", ",", "config_path", "=", "config_path", ")", "if", "'status'", "in", "current_key", "and", "current_key", "[", "'status'", "]", ":", "# retire", "# NOTE: implicitly depends on this method failing only because the key doesn't exist", "res", "=", "file_key_retire", "(", "blockchain_id", ",", "current_key", ",", "config_path", "=", "config_path", ",", "wallet_keys", "=", "wallet_keys", ")", "if", "'error'", "in", "res", ":", "log", ".", "error", "(", "\"Failed to retire key %s: %s\"", "%", "(", "current_key", "[", "'key_id'", "]", ",", "res", "[", "'error'", "]", ")", ")", "return", "{", "'error'", ":", "'Failed to retire key'", "}", "# make a new key ", "res", "=", "blockstack_gpg", ".", "gpg_app_create_key", "(", "blockchain_id", ",", "\"files\"", ",", "hostname", ",", "wallet_keys", "=", "wallet_keys", ",", "config_dir", "=", "config_dir", ")", "if", "'error'", "in", "res", ":", "log", ".", "error", "(", "\"Failed to generate new key: %s\"", "%", "res", "[", "'error'", "]", ")", "return", "{", "'error'", ":", "'Failed to generate new key'", "}", "return", "{", "'status'", ":", "True", "}" ]
Generate a new encryption key. Retire the existing key, if it exists. Return {'status': True} on success Return {'error': ...} on error
[ "Generate", "a", "new", "encryption", "key", ".", "Retire", "the", "existing", "key", "if", "it", "exists", ".", "Return", "{", "status", ":", "True", "}", "on", "success", "Return", "{", "error", ":", "...", "}", "on", "error" ]
python
train
46.16
user-cont/colin
colin/core/target.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/target.py#L383-L387
def tmpdir(self): """ Temporary directory holding all the runtime data. """ if self._tmpdir is None: self._tmpdir = mkdtemp(prefix="colin-", dir="/var/tmp") return self._tmpdir
[ "def", "tmpdir", "(", "self", ")", ":", "if", "self", ".", "_tmpdir", "is", "None", ":", "self", ".", "_tmpdir", "=", "mkdtemp", "(", "prefix", "=", "\"colin-\"", ",", "dir", "=", "\"/var/tmp\"", ")", "return", "self", ".", "_tmpdir" ]
Temporary directory holding all the runtime data.
[ "Temporary", "directory", "holding", "all", "the", "runtime", "data", "." ]
python
train
41.6
Pajinek/vhm
server/apps/core/scripts/generate.py
https://github.com/Pajinek/vhm/blob/e323e99855fd5c40fd61fba87c2646a1165505ed/server/apps/core/scripts/generate.py#L45-L92
def creatauth(name, homedir): """ Function create user in linux for group and set homedir. Function return gid and uid.""" uid, gid = [None, None] # get information about user command = "id %s" % (name) data = commands.getstatusoutput(command) if data[0] > 0: # create new system user command = "useradd -g %s %s" % (sett.APACHEIIS_GROUP, name) #command = "useradd -g %s %s" % ("www-data",name) data = commands.getstatusoutput(command) if data[0] != 0: msg = "Error: Can't create user." sys.stderr.write(msg) # set homedir for user command = "usermod -d %s %s" % (homedir, name) data = commands.getstatusoutput(command) command = "chown %s:%s %s -R" % (name, sett.APACHEIIS_GROUP, homedir) data = commands.getstatusoutput(command) # get information about user command = "id %s" % (name) data = commands.getstatusoutput(command) # check new user and get uid, gid if data[0] > 0: msg = "Error: User not create." sys.stderr.write(msg) else: for it in data[1].split(" "): m = re.search('uid=([0-9]*)', it) try: uid = m.group(1) except: pass m = re.search('gid=([0-9]*)', it) try: gid = m.group(1) except: pass return {"uid": uid, "gid": gid} return {}
[ "def", "creatauth", "(", "name", ",", "homedir", ")", ":", "uid", ",", "gid", "=", "[", "None", ",", "None", "]", "# get information about user", "command", "=", "\"id %s\"", "%", "(", "name", ")", "data", "=", "commands", ".", "getstatusoutput", "(", "command", ")", "if", "data", "[", "0", "]", ">", "0", ":", "# create new system user", "command", "=", "\"useradd -g %s %s\"", "%", "(", "sett", ".", "APACHEIIS_GROUP", ",", "name", ")", "#command = \"useradd -g %s %s\" % (\"www-data\",name)", "data", "=", "commands", ".", "getstatusoutput", "(", "command", ")", "if", "data", "[", "0", "]", "!=", "0", ":", "msg", "=", "\"Error: Can't create user.\"", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "# set homedir for user", "command", "=", "\"usermod -d %s %s\"", "%", "(", "homedir", ",", "name", ")", "data", "=", "commands", ".", "getstatusoutput", "(", "command", ")", "command", "=", "\"chown %s:%s %s -R\"", "%", "(", "name", ",", "sett", ".", "APACHEIIS_GROUP", ",", "homedir", ")", "data", "=", "commands", ".", "getstatusoutput", "(", "command", ")", "# get information about user", "command", "=", "\"id %s\"", "%", "(", "name", ")", "data", "=", "commands", ".", "getstatusoutput", "(", "command", ")", "# check new user and get uid, gid", "if", "data", "[", "0", "]", ">", "0", ":", "msg", "=", "\"Error: User not create.\"", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "else", ":", "for", "it", "in", "data", "[", "1", "]", ".", "split", "(", "\" \"", ")", ":", "m", "=", "re", ".", "search", "(", "'uid=([0-9]*)'", ",", "it", ")", "try", ":", "uid", "=", "m", ".", "group", "(", "1", ")", "except", ":", "pass", "m", "=", "re", ".", "search", "(", "'gid=([0-9]*)'", ",", "it", ")", "try", ":", "gid", "=", "m", ".", "group", "(", "1", ")", "except", ":", "pass", "return", "{", "\"uid\"", ":", "uid", ",", "\"gid\"", ":", "gid", "}", "return", "{", "}" ]
Function create user in linux for group and set homedir. Function return gid and uid.
[ "Function", "create", "user", "in", "linux", "for", "group", "and", "set", "homedir", ".", "Function", "return", "gid", "and", "uid", "." ]
python
train
29.416667
raphaelvallat/pingouin
pingouin/correlation.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L148-L193
def shepherd(x, y, n_boot=200): """ Shepherd's Pi correlation, equivalent to Spearman's rho after outliers removal. Parameters ---------- x, y : array_like First and second set of observations. x and y must be independent. n_boot : int Number of bootstrap samples to calculate. Returns ------- r : float Pi correlation coefficient pval : float Two-tailed adjusted p-value. outliers : array of bool Indicate if value is an outlier or not Notes ----- It first bootstraps the Mahalanobis distances, removes all observations with m >= 6 and finally calculates the correlation of the remaining data. Pi is Spearman's Rho after outlier removal. """ from scipy.stats import spearmanr X = np.column_stack((x, y)) # Bootstrapping on Mahalanobis distance m = bsmahal(X, X, n_boot) # Determine outliers outliers = (m >= 6) # Compute correlation r, pval = spearmanr(x[~outliers], y[~outliers]) # (optional) double the p-value to achieve a nominal false alarm rate # pval *= 2 # pval = 1 if pval > 1 else pval return r, pval, outliers
[ "def", "shepherd", "(", "x", ",", "y", ",", "n_boot", "=", "200", ")", ":", "from", "scipy", ".", "stats", "import", "spearmanr", "X", "=", "np", ".", "column_stack", "(", "(", "x", ",", "y", ")", ")", "# Bootstrapping on Mahalanobis distance", "m", "=", "bsmahal", "(", "X", ",", "X", ",", "n_boot", ")", "# Determine outliers", "outliers", "=", "(", "m", ">=", "6", ")", "# Compute correlation", "r", ",", "pval", "=", "spearmanr", "(", "x", "[", "~", "outliers", "]", ",", "y", "[", "~", "outliers", "]", ")", "# (optional) double the p-value to achieve a nominal false alarm rate", "# pval *= 2", "# pval = 1 if pval > 1 else pval", "return", "r", ",", "pval", ",", "outliers" ]
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers removal. Parameters ---------- x, y : array_like First and second set of observations. x and y must be independent. n_boot : int Number of bootstrap samples to calculate. Returns ------- r : float Pi correlation coefficient pval : float Two-tailed adjusted p-value. outliers : array of bool Indicate if value is an outlier or not Notes ----- It first bootstraps the Mahalanobis distances, removes all observations with m >= 6 and finally calculates the correlation of the remaining data. Pi is Spearman's Rho after outlier removal.
[ "Shepherd", "s", "Pi", "correlation", "equivalent", "to", "Spearman", "s", "rho", "after", "outliers", "removal", "." ]
python
train
24.913043
ranaroussi/pywallet
pywallet/utils/bip32.py
https://github.com/ranaroussi/pywallet/blob/206ff224389c490d8798f660c9e79fe97ebb64cf/pywallet/utils/bip32.py#L501-L578
def deserialize(cls, key, network="bitcoin_testnet"): """Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.) """ network = Wallet.get_network(network) if len(key) in [78, (78 + 32)]: # we have a byte array, so pass pass else: key = ensure_bytes(key) if len(key) in [78 * 2, (78 + 32) * 2]: # we have a hexlified non-base58 key, continue! key = unhexlify(key) elif len(key) == 111: # We have a base58 encoded string key = base58.b58decode_check(key) # Now that we double checkd the values, convert back to bytes because # they're easier to slice version, depth, parent_fingerprint, child, chain_code, key_data = ( key[:4], key[4], key[5:9], key[9:13], key[13:45], key[45:]) version_long = long_or_int(hexlify(version), 16) exponent = None pubkey = None point_type = key_data[0] if not isinstance(point_type, six.integer_types): point_type = ord(point_type) if point_type == 0: # Private key if version_long != network.EXT_SECRET_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_SECRET_KEY, version) exponent = key_data[1:] elif point_type in [2, 3, 4]: # Compressed public coordinates if version_long != network.EXT_PUBLIC_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_PUBLIC_KEY, version) pubkey = PublicKey.from_hex_key(key_data, network=network) # Even though this was generated from a compressed pubkey, we # want to store it as an uncompressed pubkey pubkey.compressed = False else: raise ValueError("Invalid key_data prefix, got %s" % point_type) def l(byte_seq): if byte_seq is None: return byte_seq elif isinstance(byte_seq, six.integer_types): return byte_seq return long_or_int(hexlify(byte_seq), 16) return cls(depth=l(depth), parent_fingerprint=l(parent_fingerprint), child_number=l(child), chain_code=l(chain_code), private_exponent=l(exponent), public_key=pubkey, network=network)
[ "def", "deserialize", "(", "cls", ",", "key", ",", "network", "=", "\"bitcoin_testnet\"", ")", ":", "network", "=", "Wallet", ".", "get_network", "(", "network", ")", "if", "len", "(", "key", ")", "in", "[", "78", ",", "(", "78", "+", "32", ")", "]", ":", "# we have a byte array, so pass", "pass", "else", ":", "key", "=", "ensure_bytes", "(", "key", ")", "if", "len", "(", "key", ")", "in", "[", "78", "*", "2", ",", "(", "78", "+", "32", ")", "*", "2", "]", ":", "# we have a hexlified non-base58 key, continue!", "key", "=", "unhexlify", "(", "key", ")", "elif", "len", "(", "key", ")", "==", "111", ":", "# We have a base58 encoded string", "key", "=", "base58", ".", "b58decode_check", "(", "key", ")", "# Now that we double checkd the values, convert back to bytes because", "# they're easier to slice", "version", ",", "depth", ",", "parent_fingerprint", ",", "child", ",", "chain_code", ",", "key_data", "=", "(", "key", "[", ":", "4", "]", ",", "key", "[", "4", "]", ",", "key", "[", "5", ":", "9", "]", ",", "key", "[", "9", ":", "13", "]", ",", "key", "[", "13", ":", "45", "]", ",", "key", "[", "45", ":", "]", ")", "version_long", "=", "long_or_int", "(", "hexlify", "(", "version", ")", ",", "16", ")", "exponent", "=", "None", "pubkey", "=", "None", "point_type", "=", "key_data", "[", "0", "]", "if", "not", "isinstance", "(", "point_type", ",", "six", ".", "integer_types", ")", ":", "point_type", "=", "ord", "(", "point_type", ")", "if", "point_type", "==", "0", ":", "# Private key", "if", "version_long", "!=", "network", ".", "EXT_SECRET_KEY", ":", "raise", "incompatible_network_exception_factory", "(", "network", ".", "NAME", ",", "network", ".", "EXT_SECRET_KEY", ",", "version", ")", "exponent", "=", "key_data", "[", "1", ":", "]", "elif", "point_type", "in", "[", "2", ",", "3", ",", "4", "]", ":", "# Compressed public coordinates", "if", "version_long", "!=", "network", ".", "EXT_PUBLIC_KEY", ":", "raise", "incompatible_network_exception_factory", "(", "network", ".", "NAME", ",", "network", ".", "EXT_PUBLIC_KEY", ",", "version", ")", "pubkey", "=", "PublicKey", ".", "from_hex_key", "(", "key_data", ",", "network", "=", "network", ")", "# Even though this was generated from a compressed pubkey, we", "# want to store it as an uncompressed pubkey", "pubkey", ".", "compressed", "=", "False", "else", ":", "raise", "ValueError", "(", "\"Invalid key_data prefix, got %s\"", "%", "point_type", ")", "def", "l", "(", "byte_seq", ")", ":", "if", "byte_seq", "is", "None", ":", "return", "byte_seq", "elif", "isinstance", "(", "byte_seq", ",", "six", ".", "integer_types", ")", ":", "return", "byte_seq", "return", "long_or_int", "(", "hexlify", "(", "byte_seq", ")", ",", "16", ")", "return", "cls", "(", "depth", "=", "l", "(", "depth", ")", ",", "parent_fingerprint", "=", "l", "(", "parent_fingerprint", ")", ",", "child_number", "=", "l", "(", "child", ")", ",", "chain_code", "=", "l", "(", "chain_code", ")", ",", "private_exponent", "=", "l", "(", "exponent", ")", ",", "public_key", "=", "pubkey", ",", "network", "=", "network", ")" ]
Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.)
[ "Load", "the", "ExtendedBip32Key", "from", "a", "hex", "key", "." ]
python
train
42.538462
phareous/insteonlocal
insteonlocal/Hub.py
https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Hub.py#L439-L469
def get_command_response_from_cache(self, device_id, command, command2): """Gets response""" key = self.create_key_from_command(command, command2) command_cache = self.get_cache_from_file(device_id) if device_id not in command_cache: command_cache[device_id] = {} return False elif key not in command_cache[device_id]: return False response = command_cache[device_id][key] expired = False if response['ttl'] < int(time()): self.logger.info("cache expired for device %s", device_id) expired = True if os.path.exists(LOCK_FILE): self.logger.info("cache locked - will wait to rebuild %s", device_id) else: self.logger.info("cache unlocked - will rebuild %s", device_id) newpid = os.fork() if newpid == 0: self.rebuild_cache(device_id, command, command2) if expired: self.logger.info("returning expired cached device status %s", device_id) else: self.logger.info("returning unexpired cached device status %s", device_id) return response['response']
[ "def", "get_command_response_from_cache", "(", "self", ",", "device_id", ",", "command", ",", "command2", ")", ":", "key", "=", "self", ".", "create_key_from_command", "(", "command", ",", "command2", ")", "command_cache", "=", "self", ".", "get_cache_from_file", "(", "device_id", ")", "if", "device_id", "not", "in", "command_cache", ":", "command_cache", "[", "device_id", "]", "=", "{", "}", "return", "False", "elif", "key", "not", "in", "command_cache", "[", "device_id", "]", ":", "return", "False", "response", "=", "command_cache", "[", "device_id", "]", "[", "key", "]", "expired", "=", "False", "if", "response", "[", "'ttl'", "]", "<", "int", "(", "time", "(", ")", ")", ":", "self", ".", "logger", ".", "info", "(", "\"cache expired for device %s\"", ",", "device_id", ")", "expired", "=", "True", "if", "os", ".", "path", ".", "exists", "(", "LOCK_FILE", ")", ":", "self", ".", "logger", ".", "info", "(", "\"cache locked - will wait to rebuild %s\"", ",", "device_id", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "\"cache unlocked - will rebuild %s\"", ",", "device_id", ")", "newpid", "=", "os", ".", "fork", "(", ")", "if", "newpid", "==", "0", ":", "self", ".", "rebuild_cache", "(", "device_id", ",", "command", ",", "command2", ")", "if", "expired", ":", "self", ".", "logger", ".", "info", "(", "\"returning expired cached device status %s\"", ",", "device_id", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "\"returning unexpired cached device status %s\"", ",", "device_id", ")", "return", "response", "[", "'response'", "]" ]
Gets response
[ "Gets", "response" ]
python
train
38.677419
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L863-L884
def actualize_source_type (self, sources, prop_set): """ Helper for 'actualize_sources'. For each passed source, actualizes it with the appropriate scanner. Returns the actualized virtual targets. """ assert is_iterable_typed(sources, VirtualTarget) assert isinstance(prop_set, property_set.PropertySet) result = [] for i in sources: scanner = None # FIXME: what's this? # if isinstance (i, str): # i = self.manager_.get_object (i) if i.type (): scanner = b2.build.type.get_scanner (i.type (), prop_set) r = i.actualize (scanner) result.append (r) return result
[ "def", "actualize_source_type", "(", "self", ",", "sources", ",", "prop_set", ")", ":", "assert", "is_iterable_typed", "(", "sources", ",", "VirtualTarget", ")", "assert", "isinstance", "(", "prop_set", ",", "property_set", ".", "PropertySet", ")", "result", "=", "[", "]", "for", "i", "in", "sources", ":", "scanner", "=", "None", "# FIXME: what's this?", "# if isinstance (i, str):", "# i = self.manager_.get_object (i)", "if", "i", ".", "type", "(", ")", ":", "scanner", "=", "b2", ".", "build", ".", "type", ".", "get_scanner", "(", "i", ".", "type", "(", ")", ",", "prop_set", ")", "r", "=", "i", ".", "actualize", "(", "scanner", ")", "result", ".", "append", "(", "r", ")", "return", "result" ]
Helper for 'actualize_sources'. For each passed source, actualizes it with the appropriate scanner. Returns the actualized virtual targets.
[ "Helper", "for", "actualize_sources", ".", "For", "each", "passed", "source", "actualizes", "it", "with", "the", "appropriate", "scanner", ".", "Returns", "the", "actualized", "virtual", "targets", "." ]
python
train
32.454545
Neurita/boyle
boyle/dicom/convert.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L193-L229
def remove_dcm2nii_underprocessed(filepaths): """ Return a subset of `filepaths`. Keep only the files that have a basename longer than the others with same suffix. This works based on that dcm2nii appends a preffix character for each processing step it does automatically in the DICOM to NifTI conversion. Parameters ---------- filepaths: iterable of str Returns ------- cleaned_paths: iterable of str """ cln_flist = [] # sort them by size len_sorted = sorted(filepaths, key=len) for idx, fpath in enumerate(len_sorted): remove = False # get the basename and the rest of the files fname = op.basename(fpath) rest = len_sorted[idx+1:] # check if the basename is in the basename of the rest of the files for rest_fpath in rest: rest_file = op.basename(rest_fpath) if rest_file.endswith(fname): remove = True break if not remove: cln_flist.append(fpath) return cln_flist
[ "def", "remove_dcm2nii_underprocessed", "(", "filepaths", ")", ":", "cln_flist", "=", "[", "]", "# sort them by size", "len_sorted", "=", "sorted", "(", "filepaths", ",", "key", "=", "len", ")", "for", "idx", ",", "fpath", "in", "enumerate", "(", "len_sorted", ")", ":", "remove", "=", "False", "# get the basename and the rest of the files", "fname", "=", "op", ".", "basename", "(", "fpath", ")", "rest", "=", "len_sorted", "[", "idx", "+", "1", ":", "]", "# check if the basename is in the basename of the rest of the files", "for", "rest_fpath", "in", "rest", ":", "rest_file", "=", "op", ".", "basename", "(", "rest_fpath", ")", "if", "rest_file", ".", "endswith", "(", "fname", ")", ":", "remove", "=", "True", "break", "if", "not", "remove", ":", "cln_flist", ".", "append", "(", "fpath", ")", "return", "cln_flist" ]
Return a subset of `filepaths`. Keep only the files that have a basename longer than the others with same suffix. This works based on that dcm2nii appends a preffix character for each processing step it does automatically in the DICOM to NifTI conversion. Parameters ---------- filepaths: iterable of str Returns ------- cleaned_paths: iterable of str
[ "Return", "a", "subset", "of", "filepaths", ".", "Keep", "only", "the", "files", "that", "have", "a", "basename", "longer", "than", "the", "others", "with", "same", "suffix", ".", "This", "works", "based", "on", "that", "dcm2nii", "appends", "a", "preffix", "character", "for", "each", "processing", "step", "it", "does", "automatically", "in", "the", "DICOM", "to", "NifTI", "conversion", "." ]
python
valid
27.810811
Murali-group/halp
halp/directed_hypergraph.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/directed_hypergraph.py#L833-L844
def get_forward_star(self, node): """Given a node, get a copy of that node's forward star. :param node: node to retrieve the forward-star of. :returns: set -- set of hyperedge_ids for the hyperedges in the node's forward star. :raises: ValueError -- No such node exists. """ if node not in self._node_attributes: raise ValueError("No such node exists.") return self._forward_star[node].copy()
[ "def", "get_forward_star", "(", "self", ",", "node", ")", ":", "if", "node", "not", "in", "self", ".", "_node_attributes", ":", "raise", "ValueError", "(", "\"No such node exists.\"", ")", "return", "self", ".", "_forward_star", "[", "node", "]", ".", "copy", "(", ")" ]
Given a node, get a copy of that node's forward star. :param node: node to retrieve the forward-star of. :returns: set -- set of hyperedge_ids for the hyperedges in the node's forward star. :raises: ValueError -- No such node exists.
[ "Given", "a", "node", "get", "a", "copy", "of", "that", "node", "s", "forward", "star", "." ]
python
train
39.583333
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3652-L3661
def isRef(self, doc, attr): """Determine whether an attribute is of type Ref. In case we have DTD(s) then this is simple, otherwise we use an heuristic: name Ref (upper or lowercase). """ if doc is None: doc__o = None else: doc__o = doc._o if attr is None: attr__o = None else: attr__o = attr._o ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o) return ret
[ "def", "isRef", "(", "self", ",", "doc", ",", "attr", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "attr", "is", "None", ":", "attr__o", "=", "None", "else", ":", "attr__o", "=", "attr", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlIsRef", "(", "doc__o", ",", "self", ".", "_o", ",", "attr__o", ")", "return", "ret" ]
Determine whether an attribute is of type Ref. In case we have DTD(s) then this is simple, otherwise we use an heuristic: name Ref (upper or lowercase).
[ "Determine", "whether", "an", "attribute", "is", "of", "type", "Ref", ".", "In", "case", "we", "have", "DTD", "(", "s", ")", "then", "this", "is", "simple", "otherwise", "we", "use", "an", "heuristic", ":", "name", "Ref", "(", "upper", "or", "lowercase", ")", "." ]
python
train
42.6
google/grr
grr/core/grr_response_core/lib/parsers/wmi_parser.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/wmi_parser.py#L197-L216
def ParseMultiple(self, result_dicts): """Parse the WMI packages output.""" status = rdf_client.SoftwarePackage.InstallState.INSTALLED packages = [] for result_dict in result_dicts: result = result_dict.ToDict() # InstalledOn comes back in a godawful format such as '7/10/2013'. installed_on = self.AmericanDateToEpoch(result.get("InstalledOn", "")) packages.append( rdf_client.SoftwarePackage( name=result.get("HotFixID"), description=result.get("Caption"), installed_by=result.get("InstalledBy"), install_state=status, installed_on=installed_on)) if packages: yield rdf_client.SoftwarePackages(packages=packages)
[ "def", "ParseMultiple", "(", "self", ",", "result_dicts", ")", ":", "status", "=", "rdf_client", ".", "SoftwarePackage", ".", "InstallState", ".", "INSTALLED", "packages", "=", "[", "]", "for", "result_dict", "in", "result_dicts", ":", "result", "=", "result_dict", ".", "ToDict", "(", ")", "# InstalledOn comes back in a godawful format such as '7/10/2013'.", "installed_on", "=", "self", ".", "AmericanDateToEpoch", "(", "result", ".", "get", "(", "\"InstalledOn\"", ",", "\"\"", ")", ")", "packages", ".", "append", "(", "rdf_client", ".", "SoftwarePackage", "(", "name", "=", "result", ".", "get", "(", "\"HotFixID\"", ")", ",", "description", "=", "result", ".", "get", "(", "\"Caption\"", ")", ",", "installed_by", "=", "result", ".", "get", "(", "\"InstalledBy\"", ")", ",", "install_state", "=", "status", ",", "installed_on", "=", "installed_on", ")", ")", "if", "packages", ":", "yield", "rdf_client", ".", "SoftwarePackages", "(", "packages", "=", "packages", ")" ]
Parse the WMI packages output.
[ "Parse", "the", "WMI", "packages", "output", "." ]
python
train
36.4
mikedh/trimesh
trimesh/scene/transforms.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/transforms.py#L300-L306
def show(self): """ Plot the graph layout of the scene. """ import matplotlib.pyplot as plt nx.draw(self.transforms, with_labels=True) plt.show()
[ "def", "show", "(", "self", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "nx", ".", "draw", "(", "self", ".", "transforms", ",", "with_labels", "=", "True", ")", "plt", ".", "show", "(", ")" ]
Plot the graph layout of the scene.
[ "Plot", "the", "graph", "layout", "of", "the", "scene", "." ]
python
train
26.714286
rix0rrr/gcl
gcl/query.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L173-L186
def ldSet(self, what, key, value): """List/dictionary-aware set.""" if isListKey(key): # Make sure we keep the indexes consistent, insert missing_values # as necessary. We do remember the lists, so that we can remove # missing values after inserting all values from all selectors. self.lists[id(what)] = what ix = listKeyIndex(key) while len(what) <= ix: what.append(missing_value) what[ix] = value else: what[key] = value return value
[ "def", "ldSet", "(", "self", ",", "what", ",", "key", ",", "value", ")", ":", "if", "isListKey", "(", "key", ")", ":", "# Make sure we keep the indexes consistent, insert missing_values", "# as necessary. We do remember the lists, so that we can remove", "# missing values after inserting all values from all selectors.", "self", ".", "lists", "[", "id", "(", "what", ")", "]", "=", "what", "ix", "=", "listKeyIndex", "(", "key", ")", "while", "len", "(", "what", ")", "<=", "ix", ":", "what", ".", "append", "(", "missing_value", ")", "what", "[", "ix", "]", "=", "value", "else", ":", "what", "[", "key", "]", "=", "value", "return", "value" ]
List/dictionary-aware set.
[ "List", "/", "dictionary", "-", "aware", "set", "." ]
python
train
35.285714
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py#L78-L81
def visit_Attribute(self, node): """Flatten one level of attribute access.""" new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) return ast.copy_location(new_node, node)
[ "def", "visit_Attribute", "(", "self", ",", "node", ")", ":", "new_node", "=", "ast", ".", "Name", "(", "\"%s.%s\"", "%", "(", "node", ".", "value", ".", "id", ",", "node", ".", "attr", ")", ",", "node", ".", "ctx", ")", "return", "ast", ".", "copy_location", "(", "new_node", ",", "node", ")" ]
Flatten one level of attribute access.
[ "Flatten", "one", "level", "of", "attribute", "access", "." ]
python
test
51.75
tensorflow/cleverhans
cleverhans/attack_bundling.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L490-L505
def save(criteria, report, report_path, adv_x_val): """ Saves the report and adversarial examples. :param criteria: dict, of the form returned by AttackGoal.get_criteria :param report: dict containing a confidence report :param report_path: string, filepath :param adv_x_val: numpy array containing dataset of adversarial examples """ print_stats(criteria['correctness'], criteria['confidence'], 'bundled') print("Saving to " + report_path) serial.save(report_path, report) assert report_path.endswith(".joblib") adv_x_path = report_path[:-len(".joblib")] + "_adv.npy" np.save(adv_x_path, adv_x_val)
[ "def", "save", "(", "criteria", ",", "report", ",", "report_path", ",", "adv_x_val", ")", ":", "print_stats", "(", "criteria", "[", "'correctness'", "]", ",", "criteria", "[", "'confidence'", "]", ",", "'bundled'", ")", "print", "(", "\"Saving to \"", "+", "report_path", ")", "serial", ".", "save", "(", "report_path", ",", "report", ")", "assert", "report_path", ".", "endswith", "(", "\".joblib\"", ")", "adv_x_path", "=", "report_path", "[", ":", "-", "len", "(", "\".joblib\"", ")", "]", "+", "\"_adv.npy\"", "np", ".", "save", "(", "adv_x_path", ",", "adv_x_val", ")" ]
Saves the report and adversarial examples. :param criteria: dict, of the form returned by AttackGoal.get_criteria :param report: dict containing a confidence report :param report_path: string, filepath :param adv_x_val: numpy array containing dataset of adversarial examples
[ "Saves", "the", "report", "and", "adversarial", "examples", ".", ":", "param", "criteria", ":", "dict", "of", "the", "form", "returned", "by", "AttackGoal", ".", "get_criteria", ":", "param", "report", ":", "dict", "containing", "a", "confidence", "report", ":", "param", "report_path", ":", "string", "filepath", ":", "param", "adv_x_val", ":", "numpy", "array", "containing", "dataset", "of", "adversarial", "examples" ]
python
train
38.25
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L647-L654
def geom_transform(geom, t_srs): """Transform a geometry in place """ s_srs = geom.GetSpatialReference() if not s_srs.IsSame(t_srs): ct = osr.CoordinateTransformation(s_srs, t_srs) geom.Transform(ct) geom.AssignSpatialReference(t_srs)
[ "def", "geom_transform", "(", "geom", ",", "t_srs", ")", ":", "s_srs", "=", "geom", ".", "GetSpatialReference", "(", ")", "if", "not", "s_srs", ".", "IsSame", "(", "t_srs", ")", ":", "ct", "=", "osr", ".", "CoordinateTransformation", "(", "s_srs", ",", "t_srs", ")", "geom", ".", "Transform", "(", "ct", ")", "geom", ".", "AssignSpatialReference", "(", "t_srs", ")" ]
Transform a geometry in place
[ "Transform", "a", "geometry", "in", "place" ]
python
train
33.375
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L2968-L2973
def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__
[ "def", "_get_mro", "(", "cls", ")", ":", "if", "not", "isinstance", "(", "cls", ",", "type", ")", ":", "class", "cls", "(", "cls", ",", "object", ")", ":", "pass", "return", "cls", ".", "__mro__", "[", "1", ":", "]", "return", "cls", ".", "__mro__" ]
Get an mro for a type or classic class
[ "Get", "an", "mro", "for", "a", "type", "or", "classic", "class" ]
python
test
31.166667
CalebBell/thermo
thermo/mixture.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/mixture.py#L2194-L2209
def mul(self): r'''Viscosity of the mixture in the liquid phase at its current temperature, pressure, and composition in units of [Pa*s]. For calculation of this property at other temperatures and pressures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.viscosity.ViscosityLiquidMixture`; each Mixture instance creates one to actually perform the calculations. Examples -------- >>> Mixture(['water'], ws=[1], T=320).mul 0.0005767262693751547 ''' return self.ViscosityLiquidMixture(self.T, self.P, self.zs, self.ws)
[ "def", "mul", "(", "self", ")", ":", "return", "self", ".", "ViscosityLiquidMixture", "(", "self", ".", "T", ",", "self", ".", "P", ",", "self", ".", "zs", ",", "self", ".", "ws", ")" ]
r'''Viscosity of the mixture in the liquid phase at its current temperature, pressure, and composition in units of [Pa*s]. For calculation of this property at other temperatures and pressures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.viscosity.ViscosityLiquidMixture`; each Mixture instance creates one to actually perform the calculations. Examples -------- >>> Mixture(['water'], ws=[1], T=320).mul 0.0005767262693751547
[ "r", "Viscosity", "of", "the", "mixture", "in", "the", "liquid", "phase", "at", "its", "current", "temperature", "pressure", "and", "composition", "in", "units", "of", "[", "Pa", "*", "s", "]", "." ]
python
valid
42.125
sernst/cauldron
cauldron/cli/threads.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/threads.py#L42-L49
def is_running(self) -> bool: """Specifies whether or not the thread is running""" return ( self._has_started and self.is_alive() or self.completed_at is None or (datetime.utcnow() - self.completed_at).total_seconds() < 0.5 )
[ "def", "is_running", "(", "self", ")", "->", "bool", ":", "return", "(", "self", ".", "_has_started", "and", "self", ".", "is_alive", "(", ")", "or", "self", ".", "completed_at", "is", "None", "or", "(", "datetime", ".", "utcnow", "(", ")", "-", "self", ".", "completed_at", ")", ".", "total_seconds", "(", ")", "<", "0.5", ")" ]
Specifies whether or not the thread is running
[ "Specifies", "whether", "or", "not", "the", "thread", "is", "running" ]
python
train
36.25
user-cont/colin
colin/utils/cont.py
https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/utils/cont.py#L63-L86
def name(self): """ Get the string representation of the image (registry, namespace, repository and digest together). :return: str """ name_parts = [] if self.registry: name_parts.append(self.registry) if self.namespace: name_parts.append(self.namespace) if self.repository: name_parts.append(self.repository) name = "/".join(name_parts) if self.digest: name += "@{}".format(self.digest) elif self.tag: name += ":{}".format(self.tag) return name
[ "def", "name", "(", "self", ")", ":", "name_parts", "=", "[", "]", "if", "self", ".", "registry", ":", "name_parts", ".", "append", "(", "self", ".", "registry", ")", "if", "self", ".", "namespace", ":", "name_parts", ".", "append", "(", "self", ".", "namespace", ")", "if", "self", ".", "repository", ":", "name_parts", ".", "append", "(", "self", ".", "repository", ")", "name", "=", "\"/\"", ".", "join", "(", "name_parts", ")", "if", "self", ".", "digest", ":", "name", "+=", "\"@{}\"", ".", "format", "(", "self", ".", "digest", ")", "elif", "self", ".", "tag", ":", "name", "+=", "\":{}\"", ".", "format", "(", "self", ".", "tag", ")", "return", "name" ]
Get the string representation of the image (registry, namespace, repository and digest together). :return: str
[ "Get", "the", "string", "representation", "of", "the", "image", "(", "registry", "namespace", "repository", "and", "digest", "together", ")", "." ]
python
train
24.625
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1881-L1891
def get_deadline_metadata(self): """Gets the metadata for the assessment deadline. return: (osid.Metadata) - metadata for the end time *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['deadline']) metadata.update({'existing_date_time_values': self._my_map['deadline']}) return Metadata(**metadata)
[ "def", "get_deadline_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'deadline'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_date_time_values'", ":", "self", ".", "_my_map", "[", "'deadline'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for the assessment deadline. return: (osid.Metadata) - metadata for the end time *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "the", "assessment", "deadline", "." ]
python
train
43.909091
PyCQA/pylint
pylint/pyreverse/diagrams.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diagrams.py#L217-L223
def module(self, name): """return a module by its name, raise KeyError if not found """ for mod in self.modules(): if mod.node.name == name: return mod raise KeyError(name)
[ "def", "module", "(", "self", ",", "name", ")", ":", "for", "mod", "in", "self", ".", "modules", "(", ")", ":", "if", "mod", ".", "node", ".", "name", "==", "name", ":", "return", "mod", "raise", "KeyError", "(", "name", ")" ]
return a module by its name, raise KeyError if not found
[ "return", "a", "module", "by", "its", "name", "raise", "KeyError", "if", "not", "found" ]
python
test
32.285714
Rackspace-DOT/flask_keystone
setup.py
https://github.com/Rackspace-DOT/flask_keystone/blob/6f6d630e9e66a3beca6607b0b786510ec2a79747/setup.py#L120-L144
def read(*filenames, **kwargs): """ Read file contents into string. Used by setup.py to concatenate long_description. :param string filenames: Files to be read and concatenated. :rtype: string """ encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for filename in filenames: if path.splitext(filename)[1] == ".md": try: import pypandoc buf.append(pypandoc.convert_file(filename, 'rst')) continue except: with io.open(filename, encoding=encoding) as f: buf.append(f.read()) with io.open(filename, encoding=encoding) as f: buf.append(f.read()) return sep.join(buf)
[ "def", "read", "(", "*", "filenames", ",", "*", "*", "kwargs", ")", ":", "encoding", "=", "kwargs", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "sep", "=", "kwargs", ".", "get", "(", "'sep'", ",", "'\\n'", ")", "buf", "=", "[", "]", "for", "filename", "in", "filenames", ":", "if", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "==", "\".md\"", ":", "try", ":", "import", "pypandoc", "buf", ".", "append", "(", "pypandoc", ".", "convert_file", "(", "filename", ",", "'rst'", ")", ")", "continue", "except", ":", "with", "io", ".", "open", "(", "filename", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "buf", ".", "append", "(", "f", ".", "read", "(", ")", ")", "with", "io", ".", "open", "(", "filename", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "buf", ".", "append", "(", "f", ".", "read", "(", ")", ")", "return", "sep", ".", "join", "(", "buf", ")" ]
Read file contents into string. Used by setup.py to concatenate long_description. :param string filenames: Files to be read and concatenated. :rtype: string
[ "Read", "file", "contents", "into", "string", "." ]
python
train
30.08
sdispater/orator
orator/orm/relations/belongs_to_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/belongs_to_many.py#L734-L757
def detach(self, ids=None, touch=True): """ Detach models from the relationship. """ if isinstance(ids, orator.orm.model.Model): ids = ids.get_key() if ids is None: ids = [] query = self._new_pivot_query() if not isinstance(ids, list): ids = [ids] if len(ids) > 0: query.where_in(self._other_key, ids) if touch: self.touch_if_touching() results = query.delete() return results
[ "def", "detach", "(", "self", ",", "ids", "=", "None", ",", "touch", "=", "True", ")", ":", "if", "isinstance", "(", "ids", ",", "orator", ".", "orm", ".", "model", ".", "Model", ")", ":", "ids", "=", "ids", ".", "get_key", "(", ")", "if", "ids", "is", "None", ":", "ids", "=", "[", "]", "query", "=", "self", ".", "_new_pivot_query", "(", ")", "if", "not", "isinstance", "(", "ids", ",", "list", ")", ":", "ids", "=", "[", "ids", "]", "if", "len", "(", "ids", ")", ">", "0", ":", "query", ".", "where_in", "(", "self", ".", "_other_key", ",", "ids", ")", "if", "touch", ":", "self", ".", "touch_if_touching", "(", ")", "results", "=", "query", ".", "delete", "(", ")", "return", "results" ]
Detach models from the relationship.
[ "Detach", "models", "from", "the", "relationship", "." ]
python
train
21.166667
google-research/batch-ppo
agents/algorithms/ppo/utility.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L127-L157
def gradient_summaries(grad_vars, groups=None, scope='gradients'): """Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for grad, var in grad_vars: if grad is None: continue for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(grad) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for name, grads in grouped.items(): grads = [tf.reshape(grad, [-1]) for grad in grads] grads = tf.concat(grads, 0) summaries.append(tf.summary.histogram(scope + '/' + name, grads)) return tf.summary.merge(summaries)
[ "def", "gradient_summaries", "(", "grad_vars", ",", "groups", "=", "None", ",", "scope", "=", "'gradients'", ")", ":", "groups", "=", "groups", "or", "{", "r'all'", ":", "r'.*'", "}", "grouped", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "grad", ",", "var", "in", "grad_vars", ":", "if", "grad", "is", "None", ":", "continue", "for", "name", ",", "pattern", "in", "groups", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "var", ".", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "pattern", ",", "name", ",", "var", ".", "name", ")", "grouped", "[", "name", "]", ".", "append", "(", "grad", ")", "for", "name", "in", "groups", ":", "if", "name", "not", "in", "grouped", ":", "tf", ".", "logging", ".", "warn", "(", "\"No variables matching '{}' group.\"", ".", "format", "(", "name", ")", ")", "summaries", "=", "[", "]", "for", "name", ",", "grads", "in", "grouped", ".", "items", "(", ")", ":", "grads", "=", "[", "tf", ".", "reshape", "(", "grad", ",", "[", "-", "1", "]", ")", "for", "grad", "in", "grads", "]", "grads", "=", "tf", ".", "concat", "(", "grads", ",", "0", ")", "summaries", ".", "append", "(", "tf", ".", "summary", ".", "histogram", "(", "scope", "+", "'/'", "+", "name", ",", "grads", ")", ")", "return", "tf", ".", "summary", ".", "merge", "(", "summaries", ")" ]
Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
[ "Create", "histogram", "summaries", "of", "the", "gradient", "." ]
python
train
34.064516
Netflix-Skunkworks/cloudaux
cloudaux/gcp/decorators.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/gcp/decorators.py#L100-L141
def iter_project(projects, key_file=None): """ Call decorated function for each item in project list. Note: the function 'decorated' is expected to return a value plus a dictionary of exceptions. If item in list is a dictionary, we look for a 'project' and 'key_file' entry, respectively. If item in list is of type string_types, we assume it is the project string. Default credentials will be used by the underlying client library. :param projects: list of project strings or list of dictionaries Example: {'project':..., 'keyfile':...}. Required. :type projects: ``list`` of ``str`` or ``list`` of ``dict`` :param key_file: path on disk to keyfile, for use with all projects :type key_file: ``str`` :returns: tuple containing a list of function output and an exceptions map :rtype: ``tuple of ``list``, ``dict`` """ def decorator(func): @wraps(func) def decorated_function(*args, **kwargs): item_list = [] exception_map = {} for project in projects: if isinstance(project, string_types): kwargs['project'] = project if key_file: kwargs['key_file'] = key_file elif isinstance(project, dict): kwargs['project'] = project['project'] kwargs['key_file'] = project['key_file'] itm, exc = func(*args, **kwargs) item_list.extend(itm) exception_map.update(exc) return (item_list, exception_map) return decorated_function return decorator
[ "def", "iter_project", "(", "projects", ",", "key_file", "=", "None", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "decorated_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "item_list", "=", "[", "]", "exception_map", "=", "{", "}", "for", "project", "in", "projects", ":", "if", "isinstance", "(", "project", ",", "string_types", ")", ":", "kwargs", "[", "'project'", "]", "=", "project", "if", "key_file", ":", "kwargs", "[", "'key_file'", "]", "=", "key_file", "elif", "isinstance", "(", "project", ",", "dict", ")", ":", "kwargs", "[", "'project'", "]", "=", "project", "[", "'project'", "]", "kwargs", "[", "'key_file'", "]", "=", "project", "[", "'key_file'", "]", "itm", ",", "exc", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "item_list", ".", "extend", "(", "itm", ")", "exception_map", ".", "update", "(", "exc", ")", "return", "(", "item_list", ",", "exception_map", ")", "return", "decorated_function", "return", "decorator" ]
Call decorated function for each item in project list. Note: the function 'decorated' is expected to return a value plus a dictionary of exceptions. If item in list is a dictionary, we look for a 'project' and 'key_file' entry, respectively. If item in list is of type string_types, we assume it is the project string. Default credentials will be used by the underlying client library. :param projects: list of project strings or list of dictionaries Example: {'project':..., 'keyfile':...}. Required. :type projects: ``list`` of ``str`` or ``list`` of ``dict`` :param key_file: path on disk to keyfile, for use with all projects :type key_file: ``str`` :returns: tuple containing a list of function output and an exceptions map :rtype: ``tuple of ``list``, ``dict``
[ "Call", "decorated", "function", "for", "each", "item", "in", "project", "list", "." ]
python
valid
38.904762
awslabs/sockeye
sockeye/training.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/training.py#L506-L511
def save(self, fname: str): """ Saves this training state to fname. """ with open(fname, "wb") as fp: pickle.dump(self, fp)
[ "def", "save", "(", "self", ",", "fname", ":", "str", ")", ":", "with", "open", "(", "fname", ",", "\"wb\"", ")", "as", "fp", ":", "pickle", ".", "dump", "(", "self", ",", "fp", ")" ]
Saves this training state to fname.
[ "Saves", "this", "training", "state", "to", "fname", "." ]
python
train
27
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/GPIO.py
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/GPIO.py#L93-L99
def setup_pins(self, pins): """Setup multiple pins as inputs or outputs at once. Pins should be a dict of pin name to pin type (IN or OUT). """ # General implementation that can be optimized by derived classes. for pin, value in iter(pins.items()): self.setup(pin, value)
[ "def", "setup_pins", "(", "self", ",", "pins", ")", ":", "# General implementation that can be optimized by derived classes.", "for", "pin", ",", "value", "in", "iter", "(", "pins", ".", "items", "(", ")", ")", ":", "self", ".", "setup", "(", "pin", ",", "value", ")" ]
Setup multiple pins as inputs or outputs at once. Pins should be a dict of pin name to pin type (IN or OUT).
[ "Setup", "multiple", "pins", "as", "inputs", "or", "outputs", "at", "once", ".", "Pins", "should", "be", "a", "dict", "of", "pin", "name", "to", "pin", "type", "(", "IN", "or", "OUT", ")", "." ]
python
valid
45.428571
ttinies/sc2gameMapRepo
sc2maptool/functions.py
https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/functions.py#L12-L25
def selectMap(name=None, excludeName=False, closestMatch=True, **tags): """select a map by name and/or critiera""" matches = filterMapAttrs(**tags) if not matches: raise c.InvalidMapSelection("could not find any matching maps given criteria: %s"%tags) if name: # if name is specified, consider only the best-matching names only matches = filterMapNames(name, excludeRegex=excludeName, closestMatch=closestMatch, records=matches) try: if closestMatch: return random.choice(matches) # pick any map at random that matches all criteria elif matches: return matches except IndexError: pass # matches is empty still raise c.InvalidMapSelection("requested map '%s', but could not locate "\ "it within %s or its subdirectories. Submit the map to https://"\ "github.com/ttinies/sc2gameMapRepo/tree/master/sc2maptool/maps"%( name, c.PATH_MAP_INSTALL))
[ "def", "selectMap", "(", "name", "=", "None", ",", "excludeName", "=", "False", ",", "closestMatch", "=", "True", ",", "*", "*", "tags", ")", ":", "matches", "=", "filterMapAttrs", "(", "*", "*", "tags", ")", "if", "not", "matches", ":", "raise", "c", ".", "InvalidMapSelection", "(", "\"could not find any matching maps given criteria: %s\"", "%", "tags", ")", "if", "name", ":", "# if name is specified, consider only the best-matching names only", "matches", "=", "filterMapNames", "(", "name", ",", "excludeRegex", "=", "excludeName", ",", "closestMatch", "=", "closestMatch", ",", "records", "=", "matches", ")", "try", ":", "if", "closestMatch", ":", "return", "random", ".", "choice", "(", "matches", ")", "# pick any map at random that matches all criteria", "elif", "matches", ":", "return", "matches", "except", "IndexError", ":", "pass", "# matches is empty still", "raise", "c", ".", "InvalidMapSelection", "(", "\"requested map '%s', but could not locate \"", "\"it within %s or its subdirectories. Submit the map to https://\"", "\"github.com/ttinies/sc2gameMapRepo/tree/master/sc2maptool/maps\"", "%", "(", "name", ",", "c", ".", "PATH_MAP_INSTALL", ")", ")" ]
select a map by name and/or critiera
[ "select", "a", "map", "by", "name", "and", "/", "or", "critiera" ]
python
train
65.571429
mromanello/hucitlib
knowledge_base/surfext/__init__.py
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/surfext/__init__.py#L121-L162
def add_abbreviation(self, new_abbreviation): """ Adds a new name variant to an author. :param new_abbreviation: the abbreviation to be added :return: `True` if the abbreviation is added, `False` otherwise (the abbreviation is a duplicate) """ try: assert new_abbreviation not in self.get_abbreviations() except Exception as e: # TODO: raise a custom exception logger.warning("Duplicate abbreviation detected while adding \"%s\""%new_abbreviation) return False try: type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation" , self.session.get_class(surf.ns.ECRM['E55_Type'])) abbreviation = [abbreviation for name in self.ecrm_P1_is_identified_by for abbreviation in name.ecrm_P139_has_alternative_form if name.uri == surf.ns.EFRBROO['F12_Name'] and abbreviation.ecrm_P2_has_type.first == type_abbreviation][0] abbreviation.rdfs_label.append(Literal(new_abbreviation)) abbreviation.update() return True except IndexError as e: # means there is no abbreviation instance yet type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation" , self.session.get_class(surf.ns.ECRM['E55_Type'])) Appellation = self.session.get_class(surf.ns.ECRM['E41_Appellation']) abbreviation_uri = "%s/abbr" % str(self.subject) abbreviation = Appellation(abbreviation_uri) abbreviation.ecrm_P2_has_type = type_abbreviation abbreviation.rdfs_label.append(Literal(new_abbreviation)) abbreviation.save() name = (name for name in self.ecrm_P1_is_identified_by if name.uri == surf.ns.EFRBROO['F12_Name']).next() name.ecrm_P139_has_alternative_form = abbreviation name.update() return True except Exception as e: raise e
[ "def", "add_abbreviation", "(", "self", ",", "new_abbreviation", ")", ":", "try", ":", "assert", "new_abbreviation", "not", "in", "self", ".", "get_abbreviations", "(", ")", "except", "Exception", "as", "e", ":", "# TODO: raise a custom exception", "logger", ".", "warning", "(", "\"Duplicate abbreviation detected while adding \\\"%s\\\"\"", "%", "new_abbreviation", ")", "return", "False", "try", ":", "type_abbreviation", "=", "self", ".", "session", ".", "get_resource", "(", "BASE_URI_TYPES", "%", "\"abbreviation\"", ",", "self", ".", "session", ".", "get_class", "(", "surf", ".", "ns", ".", "ECRM", "[", "'E55_Type'", "]", ")", ")", "abbreviation", "=", "[", "abbreviation", "for", "name", "in", "self", ".", "ecrm_P1_is_identified_by", "for", "abbreviation", "in", "name", ".", "ecrm_P139_has_alternative_form", "if", "name", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F12_Name'", "]", "and", "abbreviation", ".", "ecrm_P2_has_type", ".", "first", "==", "type_abbreviation", "]", "[", "0", "]", "abbreviation", ".", "rdfs_label", ".", "append", "(", "Literal", "(", "new_abbreviation", ")", ")", "abbreviation", ".", "update", "(", ")", "return", "True", "except", "IndexError", "as", "e", ":", "# means there is no abbreviation instance yet", "type_abbreviation", "=", "self", ".", "session", ".", "get_resource", "(", "BASE_URI_TYPES", "%", "\"abbreviation\"", ",", "self", ".", "session", ".", "get_class", "(", "surf", ".", "ns", ".", "ECRM", "[", "'E55_Type'", "]", ")", ")", "Appellation", "=", "self", ".", "session", ".", "get_class", "(", "surf", ".", "ns", ".", "ECRM", "[", "'E41_Appellation'", "]", ")", "abbreviation_uri", "=", "\"%s/abbr\"", "%", "str", "(", "self", ".", "subject", ")", "abbreviation", "=", "Appellation", "(", "abbreviation_uri", ")", "abbreviation", ".", "ecrm_P2_has_type", "=", "type_abbreviation", "abbreviation", ".", "rdfs_label", ".", "append", "(", "Literal", "(", "new_abbreviation", ")", ")", "abbreviation", ".", "save", "(", ")", "name", "=", "(", "name", "for", "name", "in", "self", ".", "ecrm_P1_is_identified_by", "if", "name", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F12_Name'", "]", ")", ".", "next", "(", ")", "name", ".", "ecrm_P139_has_alternative_form", "=", "abbreviation", "name", ".", "update", "(", ")", "return", "True", "except", "Exception", "as", "e", ":", "raise", "e" ]
Adds a new name variant to an author. :param new_abbreviation: the abbreviation to be added :return: `True` if the abbreviation is added, `False` otherwise (the abbreviation is a duplicate)
[ "Adds", "a", "new", "name", "variant", "to", "an", "author", "." ]
python
train
52.333333
RedFantom/ttkwidgets
ttkwidgets/timeline.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L780-L798
def get_time_string(time, unit): """ Create a properly formatted string given a time and unit :param time: Time to format :type time: float :param unit: Unit to apply format of. Only supports hours ('h') and minutes ('m'). :type unit: str :return: A string in format '{whole}:{part}' :rtype: str """ supported_units = ["h", "m"] if unit not in supported_units: return "{}".format(round(time, 2)) hours, minutes = str(time).split(".") hours = int(hours) minutes = int(round(float("0.{}".format(minutes)) * 60)) return "{:02d}:{:02d}".format(hours, minutes)
[ "def", "get_time_string", "(", "time", ",", "unit", ")", ":", "supported_units", "=", "[", "\"h\"", ",", "\"m\"", "]", "if", "unit", "not", "in", "supported_units", ":", "return", "\"{}\"", ".", "format", "(", "round", "(", "time", ",", "2", ")", ")", "hours", ",", "minutes", "=", "str", "(", "time", ")", ".", "split", "(", "\".\"", ")", "hours", "=", "int", "(", "hours", ")", "minutes", "=", "int", "(", "round", "(", "float", "(", "\"0.{}\"", ".", "format", "(", "minutes", ")", ")", "*", "60", ")", ")", "return", "\"{:02d}:{:02d}\"", ".", "format", "(", "hours", ",", "minutes", ")" ]
Create a properly formatted string given a time and unit :param time: Time to format :type time: float :param unit: Unit to apply format of. Only supports hours ('h') and minutes ('m'). :type unit: str :return: A string in format '{whole}:{part}' :rtype: str
[ "Create", "a", "properly", "formatted", "string", "given", "a", "time", "and", "unit" ]
python
train
35.894737
GNS3/gns3-server
gns3server/compute/dynamips/nodes/c7200.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/c7200.py#L209-L229
def set_power_supplies(self, power_supplies): """ Sets the 2 power supplies with 0 = off, 1 = on. :param power_supplies: list of 2 power supplies. Example: [1, 0] = first power supply is on, second is off. """ power_supply_id = 0 for power_supply in power_supplies: yield from self._hypervisor.send('c7200 set_power_supply "{name}" {power_supply_id} {powered_on}'.format(name=self._name, power_supply_id=power_supply_id, powered_on=power_supply)) log.info('Router "{name}" [{id}]: power supply {power_supply_id} state updated to {powered_on}'.format(name=self._name, id=self._id, power_supply_id=power_supply_id, powered_on=power_supply)) power_supply_id += 1 self._power_supplies = power_supplies
[ "def", "set_power_supplies", "(", "self", ",", "power_supplies", ")", ":", "power_supply_id", "=", "0", "for", "power_supply", "in", "power_supplies", ":", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'c7200 set_power_supply \"{name}\" {power_supply_id} {powered_on}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "power_supply_id", "=", "power_supply_id", ",", "powered_on", "=", "power_supply", ")", ")", "log", ".", "info", "(", "'Router \"{name}\" [{id}]: power supply {power_supply_id} state updated to {powered_on}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ",", "power_supply_id", "=", "power_supply_id", ",", "powered_on", "=", "power_supply", ")", ")", "power_supply_id", "+=", "1", "self", ".", "_power_supplies", "=", "power_supplies" ]
Sets the 2 power supplies with 0 = off, 1 = on. :param power_supplies: list of 2 power supplies. Example: [1, 0] = first power supply is on, second is off.
[ "Sets", "the", "2", "power", "supplies", "with", "0", "=", "off", "1", "=", "on", "." ]
python
train
64.761905
F5Networks/f5-common-python
f5-sdk-dist/build_pkgs.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5-sdk-dist/build_pkgs.py#L351-L366
def store_json(obj, destination): """store_json Takes in a json-portable object and a filesystem-based destination and stores the json-portable object as JSON into the filesystem-based destination. This is blind, dumb, and stupid; thus, it can fail if the object is more complex than simple dict, list, int, str, etc. type object structures. """ with open(destination, 'r+') as FH: fcntl.lockf(FH, fcntl.LOCK_EX) json_in = json.loads(FH.read()) json_in.update(obj) # obj overwrites items in json_in... FH.seek(0) FH.write(json.dumps(json_in, sort_keys=True, indent=4, separators=(',', ': ')))
[ "def", "store_json", "(", "obj", ",", "destination", ")", ":", "with", "open", "(", "destination", ",", "'r+'", ")", "as", "FH", ":", "fcntl", ".", "lockf", "(", "FH", ",", "fcntl", ".", "LOCK_EX", ")", "json_in", "=", "json", ".", "loads", "(", "FH", ".", "read", "(", ")", ")", "json_in", ".", "update", "(", "obj", ")", "# obj overwrites items in json_in...", "FH", ".", "seek", "(", "0", ")", "FH", ".", "write", "(", "json", ".", "dumps", "(", "json_in", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")" ]
store_json Takes in a json-portable object and a filesystem-based destination and stores the json-portable object as JSON into the filesystem-based destination. This is blind, dumb, and stupid; thus, it can fail if the object is more complex than simple dict, list, int, str, etc. type object structures.
[ "store_json" ]
python
train
41.25
tomi77/python-t77-date
t77_date/datetime.py
https://github.com/tomi77/python-t77-date/blob/b4b12ce6a02884fb62460f6b9068e7fa28979fce/t77_date/datetime.py#L74-L87
def set_next_week_day(val, week_day, iso=False): """ Set week day. New date will be greater or equal than input date. :param val: datetime or date :type val: datetime.datetime | datetime.date :param week_day: Week day to set :type week_day: int :param iso: week_day in ISO format, or not :type iso: bool :return: datetime.datetime | datetime.date """ return _set_week_day(val, week_day, val.isoweekday() if iso else val.weekday(), sign=1)
[ "def", "set_next_week_day", "(", "val", ",", "week_day", ",", "iso", "=", "False", ")", ":", "return", "_set_week_day", "(", "val", ",", "week_day", ",", "val", ".", "isoweekday", "(", ")", "if", "iso", "else", "val", ".", "weekday", "(", ")", ",", "sign", "=", "1", ")" ]
Set week day. New date will be greater or equal than input date. :param val: datetime or date :type val: datetime.datetime | datetime.date :param week_day: Week day to set :type week_day: int :param iso: week_day in ISO format, or not :type iso: bool :return: datetime.datetime | datetime.date
[ "Set", "week", "day", ".", "New", "date", "will", "be", "greater", "or", "equal", "than", "input", "date", ".", ":", "param", "val", ":", "datetime", "or", "date", ":", "type", "val", ":", "datetime", ".", "datetime", "|", "datetime", ".", "date", ":", "param", "week_day", ":", "Week", "day", "to", "set", ":", "type", "week_day", ":", "int", ":", "param", "iso", ":", "week_day", "in", "ISO", "format", "or", "not", ":", "type", "iso", ":", "bool", ":", "return", ":", "datetime", ".", "datetime", "|", "datetime", ".", "date" ]
python
train
35.571429
edx/edx-enterprise
enterprise/api_client/lms.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api_client/lms.py#L377-L398
def _get_results(self, identity_provider, param_name, param_value, result_field_name): """ Calls the third party auth api endpoint to get the mapping between usernames and remote ids. """ try: kwargs = {param_name: param_value} returned = self.client.providers(identity_provider).users.get(**kwargs) results = returned.get('results', []) except HttpNotFoundError: LOGGER.error( 'username not found for third party provider={provider}, {querystring_param}={id}'.format( provider=identity_provider, querystring_param=param_name, id=param_value ) ) results = [] for row in results: if row.get(param_name) == param_value: return row.get(result_field_name) return None
[ "def", "_get_results", "(", "self", ",", "identity_provider", ",", "param_name", ",", "param_value", ",", "result_field_name", ")", ":", "try", ":", "kwargs", "=", "{", "param_name", ":", "param_value", "}", "returned", "=", "self", ".", "client", ".", "providers", "(", "identity_provider", ")", ".", "users", ".", "get", "(", "*", "*", "kwargs", ")", "results", "=", "returned", ".", "get", "(", "'results'", ",", "[", "]", ")", "except", "HttpNotFoundError", ":", "LOGGER", ".", "error", "(", "'username not found for third party provider={provider}, {querystring_param}={id}'", ".", "format", "(", "provider", "=", "identity_provider", ",", "querystring_param", "=", "param_name", ",", "id", "=", "param_value", ")", ")", "results", "=", "[", "]", "for", "row", "in", "results", ":", "if", "row", ".", "get", "(", "param_name", ")", "==", "param_value", ":", "return", "row", ".", "get", "(", "result_field_name", ")", "return", "None" ]
Calls the third party auth api endpoint to get the mapping between usernames and remote ids.
[ "Calls", "the", "third", "party", "auth", "api", "endpoint", "to", "get", "the", "mapping", "between", "usernames", "and", "remote", "ids", "." ]
python
valid
40.5
clalancette/pycdlib
pycdlib/dr.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/dr.py#L1077-L1093
def set_data_location(self, current_extent, tag_location): # pylint: disable=unused-argument # type: (int, int) -> None ''' A method to set the new extent location that the data for this Directory Record should live at. Parameters: current_extent - The new extent. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') self.new_extent_loc = current_extent if self.ptr is not None: self.ptr.update_extent_location(current_extent)
[ "def", "set_data_location", "(", "self", ",", "current_extent", ",", "tag_location", ")", ":", "# pylint: disable=unused-argument", "# type: (int, int) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Directory Record not yet initialized'", ")", "self", ".", "new_extent_loc", "=", "current_extent", "if", "self", ".", "ptr", "is", "not", "None", ":", "self", ".", "ptr", ".", "update_extent_location", "(", "current_extent", ")" ]
A method to set the new extent location that the data for this Directory Record should live at. Parameters: current_extent - The new extent. Returns: Nothing.
[ "A", "method", "to", "set", "the", "new", "extent", "location", "that", "the", "data", "for", "this", "Directory", "Record", "should", "live", "at", "." ]
python
train
36.176471
utek/pyseaweed
pyseaweed/weed.py
https://github.com/utek/pyseaweed/blob/218049329885425a2b8370157fa44952e64516be/pyseaweed/weed.py#L107-L124
def get_file_size(self, fid): """ Gets size of uploaded file Or None if file doesn't exist. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Int or None """ url = self.get_file_url(fid) res = self.conn.head(url) if res is not None: size = res.headers.get("content-length", None) if size is not None: return int(size) return None
[ "def", "get_file_size", "(", "self", ",", "fid", ")", ":", "url", "=", "self", ".", "get_file_url", "(", "fid", ")", "res", "=", "self", ".", "conn", ".", "head", "(", "url", ")", "if", "res", "is", "not", "None", ":", "size", "=", "res", ".", "headers", ".", "get", "(", "\"content-length\"", ",", "None", ")", "if", "size", "is", "not", "None", ":", "return", "int", "(", "size", ")", "return", "None" ]
Gets size of uploaded file Or None if file doesn't exist. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Int or None
[ "Gets", "size", "of", "uploaded", "file", "Or", "None", "if", "file", "doesn", "t", "exist", "." ]
python
train
26.5
collectiveacuity/jsonModel
jsonmodel/validators.py
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1414-L1468
def _walk(self, path_to_root, record_dict): ''' a helper method for finding the record endpoint from a path to root :param path_to_root: string with dot path to root from :param record_dict: :return: list, dict, string, number, or boolean at path to root ''' # split path to root into segments item_pattern = re.compile('\d+\\]') dot_pattern = re.compile('\\.|\\[') path_segments = dot_pattern.split(path_to_root) # construct empty fields record_endpoints = [] # determine starting position if not path_segments[0]: path_segments.pop(0) # define internal recursive function def _walk_int(path_segments, record_dict): record_endpoint = record_dict for i in range(0, len(path_segments)): if item_pattern.match(path_segments[i]): for j in range(0, len(record_endpoint)): if len(path_segments) == 2: record_endpoints.append(record_endpoint[j]) else: stop_chain = False for x in range(0, i): if item_pattern.match(path_segments[x]): stop_chain = True if not stop_chain: shortened_segments = [] for z in range(i + 1, len(path_segments)): shortened_segments.append(path_segments[z]) _walk_int(shortened_segments, record_endpoint[j]) else: stop_chain = False for y in range(0, i): if item_pattern.match(path_segments[y]): stop_chain = True if not stop_chain: if len(path_segments) == i + 1: record_endpoints.append(record_endpoint[path_segments[i]]) else: record_endpoint = record_endpoint[path_segments[i]] # conduct recursive walk _walk_int(path_segments, record_dict) return record_endpoints
[ "def", "_walk", "(", "self", ",", "path_to_root", ",", "record_dict", ")", ":", "# split path to root into segments", "item_pattern", "=", "re", ".", "compile", "(", "'\\d+\\\\]'", ")", "dot_pattern", "=", "re", ".", "compile", "(", "'\\\\.|\\\\['", ")", "path_segments", "=", "dot_pattern", ".", "split", "(", "path_to_root", ")", "# construct empty fields", "record_endpoints", "=", "[", "]", "# determine starting position", "if", "not", "path_segments", "[", "0", "]", ":", "path_segments", ".", "pop", "(", "0", ")", "# define internal recursive function", "def", "_walk_int", "(", "path_segments", ",", "record_dict", ")", ":", "record_endpoint", "=", "record_dict", "for", "i", "in", "range", "(", "0", ",", "len", "(", "path_segments", ")", ")", ":", "if", "item_pattern", ".", "match", "(", "path_segments", "[", "i", "]", ")", ":", "for", "j", "in", "range", "(", "0", ",", "len", "(", "record_endpoint", ")", ")", ":", "if", "len", "(", "path_segments", ")", "==", "2", ":", "record_endpoints", ".", "append", "(", "record_endpoint", "[", "j", "]", ")", "else", ":", "stop_chain", "=", "False", "for", "x", "in", "range", "(", "0", ",", "i", ")", ":", "if", "item_pattern", ".", "match", "(", "path_segments", "[", "x", "]", ")", ":", "stop_chain", "=", "True", "if", "not", "stop_chain", ":", "shortened_segments", "=", "[", "]", "for", "z", "in", "range", "(", "i", "+", "1", ",", "len", "(", "path_segments", ")", ")", ":", "shortened_segments", ".", "append", "(", "path_segments", "[", "z", "]", ")", "_walk_int", "(", "shortened_segments", ",", "record_endpoint", "[", "j", "]", ")", "else", ":", "stop_chain", "=", "False", "for", "y", "in", "range", "(", "0", ",", "i", ")", ":", "if", "item_pattern", ".", "match", "(", "path_segments", "[", "y", "]", ")", ":", "stop_chain", "=", "True", "if", "not", "stop_chain", ":", "if", "len", "(", "path_segments", ")", "==", "i", "+", "1", ":", "record_endpoints", ".", "append", "(", "record_endpoint", "[", "path_segments", "[", "i", "]", "]", ")", "else", ":", "record_endpoint", "=", "record_endpoint", "[", "path_segments", "[", "i", "]", "]", "# conduct recursive walk", "_walk_int", "(", "path_segments", ",", "record_dict", ")", "return", "record_endpoints" ]
a helper method for finding the record endpoint from a path to root :param path_to_root: string with dot path to root from :param record_dict: :return: list, dict, string, number, or boolean at path to root
[ "a", "helper", "method", "for", "finding", "the", "record", "endpoint", "from", "a", "path", "to", "root" ]
python
train
40.872727
secynic/ipwhois
ipwhois/net.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/net.py#L672-L793
def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120, headers=None): """ The function for retrieving a json result via HTTP. Args: url (:obj:`str`): The URL to retrieve (required). retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json or HTTP error 429. Defaults to 60. headers (:obj:`dict`): The HTTP headers. The Accept header defaults to 'application/rdap+json'. Returns: dict: The data in json format. Raises: HTTPLookupError: The HTTP lookup failed. HTTPRateLimitError: The HTTP request rate limited and retries were exhausted. """ if headers is None: headers = {'Accept': 'application/rdap+json'} try: # Create the connection for the whois query. log.debug('HTTP query for {0} at {1}'.format( self.address_str, url)) conn = Request(url, headers=headers) data = self.opener.open(conn, timeout=self.timeout) try: d = json.loads(data.readall().decode('utf-8', 'ignore')) except AttributeError: # pragma: no cover d = json.loads(data.read().decode('utf-8', 'ignore')) try: # Tests written but commented out. I do not want to send a # flood of requests on every test. for tmp in d['notices']: # pragma: no cover if tmp['title'] == 'Rate Limit Notice': log.debug('RDAP query rate limit exceeded.') if retry_count > 0: log.debug('Waiting {0} seconds...'.format( str(rate_limit_timeout))) sleep(rate_limit_timeout) return self.get_http_json( url=url, retry_count=retry_count-1, rate_limit_timeout=rate_limit_timeout, headers=headers ) else: raise HTTPRateLimitError( 'HTTP lookup failed for {0}. Rate limit ' 'exceeded, wait and try again (possibly a ' 'temporary block).'.format(url)) except (KeyError, IndexError): # pragma: no cover pass return d except HTTPError as e: # pragma: no cover # RIPE is producing this HTTP error rather than a JSON error. if e.code == 429: log.debug('HTTP query rate limit exceeded.') if retry_count > 0: log.debug('Waiting {0} seconds...'.format( str(rate_limit_timeout))) sleep(rate_limit_timeout) return self.get_http_json( url=url, retry_count=retry_count - 1, rate_limit_timeout=rate_limit_timeout, headers=headers ) else: raise HTTPRateLimitError( 'HTTP lookup failed for {0}. Rate limit ' 'exceeded, wait and try again (possibly a ' 'temporary block).'.format(url)) else: raise HTTPLookupError('HTTP lookup failed for {0} with error ' 'code {1}.'.format(url, str(e.code))) except (URLError, socket.timeout, socket.error) as e: log.debug('HTTP query socket error: {0}'.format(e)) if retry_count > 0: log.debug('HTTP query retrying (count: {0})'.format( str(retry_count))) return self.get_http_json( url=url, retry_count=retry_count-1, rate_limit_timeout=rate_limit_timeout, headers=headers ) else: raise HTTPLookupError('HTTP lookup failed for {0}.'.format( url)) except (HTTPLookupError, HTTPRateLimitError) as e: # pragma: no cover raise e except: # pragma: no cover raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))
[ "def", "get_http_json", "(", "self", ",", "url", "=", "None", ",", "retry_count", "=", "3", ",", "rate_limit_timeout", "=", "120", ",", "headers", "=", "None", ")", ":", "if", "headers", "is", "None", ":", "headers", "=", "{", "'Accept'", ":", "'application/rdap+json'", "}", "try", ":", "# Create the connection for the whois query.", "log", ".", "debug", "(", "'HTTP query for {0} at {1}'", ".", "format", "(", "self", ".", "address_str", ",", "url", ")", ")", "conn", "=", "Request", "(", "url", ",", "headers", "=", "headers", ")", "data", "=", "self", ".", "opener", ".", "open", "(", "conn", ",", "timeout", "=", "self", ".", "timeout", ")", "try", ":", "d", "=", "json", ".", "loads", "(", "data", ".", "readall", "(", ")", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", ")", "except", "AttributeError", ":", "# pragma: no cover", "d", "=", "json", ".", "loads", "(", "data", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", ")", "try", ":", "# Tests written but commented out. I do not want to send a", "# flood of requests on every test.", "for", "tmp", "in", "d", "[", "'notices'", "]", ":", "# pragma: no cover", "if", "tmp", "[", "'title'", "]", "==", "'Rate Limit Notice'", ":", "log", ".", "debug", "(", "'RDAP query rate limit exceeded.'", ")", "if", "retry_count", ">", "0", ":", "log", ".", "debug", "(", "'Waiting {0} seconds...'", ".", "format", "(", "str", "(", "rate_limit_timeout", ")", ")", ")", "sleep", "(", "rate_limit_timeout", ")", "return", "self", ".", "get_http_json", "(", "url", "=", "url", ",", "retry_count", "=", "retry_count", "-", "1", ",", "rate_limit_timeout", "=", "rate_limit_timeout", ",", "headers", "=", "headers", ")", "else", ":", "raise", "HTTPRateLimitError", "(", "'HTTP lookup failed for {0}. Rate limit '", "'exceeded, wait and try again (possibly a '", "'temporary block).'", ".", "format", "(", "url", ")", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "# pragma: no cover", "pass", "return", "d", "except", "HTTPError", "as", "e", ":", "# pragma: no cover", "# RIPE is producing this HTTP error rather than a JSON error.", "if", "e", ".", "code", "==", "429", ":", "log", ".", "debug", "(", "'HTTP query rate limit exceeded.'", ")", "if", "retry_count", ">", "0", ":", "log", ".", "debug", "(", "'Waiting {0} seconds...'", ".", "format", "(", "str", "(", "rate_limit_timeout", ")", ")", ")", "sleep", "(", "rate_limit_timeout", ")", "return", "self", ".", "get_http_json", "(", "url", "=", "url", ",", "retry_count", "=", "retry_count", "-", "1", ",", "rate_limit_timeout", "=", "rate_limit_timeout", ",", "headers", "=", "headers", ")", "else", ":", "raise", "HTTPRateLimitError", "(", "'HTTP lookup failed for {0}. Rate limit '", "'exceeded, wait and try again (possibly a '", "'temporary block).'", ".", "format", "(", "url", ")", ")", "else", ":", "raise", "HTTPLookupError", "(", "'HTTP lookup failed for {0} with error '", "'code {1}.'", ".", "format", "(", "url", ",", "str", "(", "e", ".", "code", ")", ")", ")", "except", "(", "URLError", ",", "socket", ".", "timeout", ",", "socket", ".", "error", ")", "as", "e", ":", "log", ".", "debug", "(", "'HTTP query socket error: {0}'", ".", "format", "(", "e", ")", ")", "if", "retry_count", ">", "0", ":", "log", ".", "debug", "(", "'HTTP query retrying (count: {0})'", ".", "format", "(", "str", "(", "retry_count", ")", ")", ")", "return", "self", ".", "get_http_json", "(", "url", "=", "url", ",", "retry_count", "=", "retry_count", "-", "1", ",", "rate_limit_timeout", "=", "rate_limit_timeout", ",", "headers", "=", "headers", ")", "else", ":", "raise", "HTTPLookupError", "(", "'HTTP lookup failed for {0}.'", ".", "format", "(", "url", ")", ")", "except", "(", "HTTPLookupError", ",", "HTTPRateLimitError", ")", "as", "e", ":", "# pragma: no cover", "raise", "e", "except", ":", "# pragma: no cover", "raise", "HTTPLookupError", "(", "'HTTP lookup failed for {0}.'", ".", "format", "(", "url", ")", ")" ]
The function for retrieving a json result via HTTP. Args: url (:obj:`str`): The URL to retrieve (required). retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json or HTTP error 429. Defaults to 60. headers (:obj:`dict`): The HTTP headers. The Accept header defaults to 'application/rdap+json'. Returns: dict: The data in json format. Raises: HTTPLookupError: The HTTP lookup failed. HTTPRateLimitError: The HTTP request rate limited and retries were exhausted.
[ "The", "function", "for", "retrieving", "a", "json", "result", "via", "HTTP", "." ]
python
train
37.852459