repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/modules/win_status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_status.py#L506-L589
def master(master=None, connected=True): ''' .. versionadded:: 2015.5.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' def _win_remotes_on(port): ''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections PS C:> netstat -n -p TCP Active Connections Proto Local Address Foreign Address State TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_unicode(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() remote_host, remote_port = chunks[2].rsplit(':', 1) if int(remote_port) != port: continue remotes.add(remote_host) return remotes # the default publishing port port = 4505 master_ips = None if master: master_ips = _host_to_ips(master) if not master_ips: return if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) master_connection_status = False connected_ips = _win_remotes_on(port) # Get connection status for master for master_ip in master_ips: if master_ip in connected_ips: master_connection_status = True break # Connection to master is not as expected if master_connection_status is not connected: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) if master_connection_status: event.fire_event({'master': master}, salt.minion.master_event(type='connected')) else: event.fire_event({'master': master}, salt.minion.master_event(type='disconnected')) return master_connection_status
[ "def", "master", "(", "master", "=", "None", ",", "connected", "=", "True", ")", ":", "def", "_win_remotes_on", "(", "port", ")", ":", "'''\n Windows specific helper function.\n Returns set of ipv4 host addresses of remote established connections\n on local or remote tcp port.\n\n Parses output of shell 'netstat' to get connections\n\n PS C:> netstat -n -p TCP\n\n Active Connections\n\n Proto Local Address Foreign Address State\n TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED\n TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT\n TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT\n TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED\n TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED\n '''", "remotes", "=", "set", "(", ")", "try", ":", "data", "=", "subprocess", ".", "check_output", "(", "[", "'netstat'", ",", "'-n'", ",", "'-p'", ",", "'TCP'", "]", ")", "# pylint: disable=minimum-python-version", "except", "subprocess", ".", "CalledProcessError", ":", "log", ".", "error", "(", "'Failed netstat'", ")", "raise", "lines", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "data", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "'ESTABLISHED'", "not", "in", "line", ":", "continue", "chunks", "=", "line", ".", "split", "(", ")", "remote_host", ",", "remote_port", "=", "chunks", "[", "2", "]", ".", "rsplit", "(", "':'", ",", "1", ")", "if", "int", "(", "remote_port", ")", "!=", "port", ":", "continue", "remotes", ".", "add", "(", "remote_host", ")", "return", "remotes", "# the default publishing port", "port", "=", "4505", "master_ips", "=", "None", "if", "master", ":", "master_ips", "=", "_host_to_ips", "(", "master", ")", "if", "not", "master_ips", ":", "return", "if", "__salt__", "[", "'config.get'", "]", "(", "'publish_port'", ")", "!=", "''", ":", "port", "=", "int", "(", "__salt__", "[", "'config.get'", "]", "(", "'publish_port'", ")", ")", "master_connection_status", "=", "False", "connected_ips", "=", "_win_remotes_on", "(", "port", ")", "# Get connection status for master", "for", "master_ip", "in", "master_ips", ":", "if", "master_ip", "in", "connected_ips", ":", "master_connection_status", "=", "True", "break", "# Connection to master is not as expected", "if", "master_connection_status", "is", "not", "connected", ":", "event", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'minion'", ",", "opts", "=", "__opts__", ",", "listen", "=", "False", ")", "if", "master_connection_status", ":", "event", ".", "fire_event", "(", "{", "'master'", ":", "master", "}", ",", "salt", ".", "minion", ".", "master_event", "(", "type", "=", "'connected'", ")", ")", "else", ":", "event", ".", "fire_event", "(", "{", "'master'", ":", "master", "}", ",", "salt", ".", "minion", ".", "master_event", "(", "type", "=", "'disconnected'", ")", ")", "return", "master_connection_status" ]
.. versionadded:: 2015.5.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master
[ "..", "versionadded", "::", "2015", ".", "5", ".", "0" ]
python
train
32.452381
ausaki/subfinder
subfinder/subsearcher/subsearcher.py
https://github.com/ausaki/subfinder/blob/b74b79214f618c603a551b9334ebb110ccf9684c/subfinder/subsearcher/subsearcher.py#L111-L173
def _parse_videoname(cls, videoname): """ parse videoname and return video info dict video info contains: - title, the name of video - sub_title, the sub_title of video - resolution, - source, - - season, defaults to 0 - episode, defaults to 0 """ info = { 'title': '', 'season': 0, 'episode': 0, 'sub_title': '', 'resolution': '', 'source': '', 'audio_encoding': '', 'video_encoding': '', } last_index = 0 m = cls.RE_SEASON_EPISODE.search(videoname) if m: info['season'] = int(m.group('season')) info['episode'] = int(m.group('episode')) s, e = m.span() info['title'] = videoname[0:s].strip('.') last_index = e else: m = cls.RE_SEASON.search(videoname) if m: info['season'] = int(m.group('season')) s, e = m.span() info['title'] = videoname[0:s].strip('.') last_index = e m = cls.RE_RESOLUTION.search(videoname) if m: info['resolution'] = m.group('resolution') s, e = m.span() if info['title'] == '': info['title'] = videoname[0:s].strip('.') if info['season'] > 0 and info['episode'] > 0: info['sub_title'] = videoname[last_index:s].strip('.') last_index = e if info['title'] == '': info['title'] = videoname m = cls.RE_SOURCE.search(videoname) if m: info['source'] = m.group('source') m = cls.RE_AUDIO_ENC.search(videoname) if m: info['audio_encoding'] = m.group('audio_encoding') m = cls.RE_VIDEO_ENC.search(videoname) if m: info['video_encoding'] = m.group('video_encoding') return info
[ "def", "_parse_videoname", "(", "cls", ",", "videoname", ")", ":", "info", "=", "{", "'title'", ":", "''", ",", "'season'", ":", "0", ",", "'episode'", ":", "0", ",", "'sub_title'", ":", "''", ",", "'resolution'", ":", "''", ",", "'source'", ":", "''", ",", "'audio_encoding'", ":", "''", ",", "'video_encoding'", ":", "''", ",", "}", "last_index", "=", "0", "m", "=", "cls", ".", "RE_SEASON_EPISODE", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'season'", "]", "=", "int", "(", "m", ".", "group", "(", "'season'", ")", ")", "info", "[", "'episode'", "]", "=", "int", "(", "m", ".", "group", "(", "'episode'", ")", ")", "s", ",", "e", "=", "m", ".", "span", "(", ")", "info", "[", "'title'", "]", "=", "videoname", "[", "0", ":", "s", "]", ".", "strip", "(", "'.'", ")", "last_index", "=", "e", "else", ":", "m", "=", "cls", ".", "RE_SEASON", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'season'", "]", "=", "int", "(", "m", ".", "group", "(", "'season'", ")", ")", "s", ",", "e", "=", "m", ".", "span", "(", ")", "info", "[", "'title'", "]", "=", "videoname", "[", "0", ":", "s", "]", ".", "strip", "(", "'.'", ")", "last_index", "=", "e", "m", "=", "cls", ".", "RE_RESOLUTION", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'resolution'", "]", "=", "m", ".", "group", "(", "'resolution'", ")", "s", ",", "e", "=", "m", ".", "span", "(", ")", "if", "info", "[", "'title'", "]", "==", "''", ":", "info", "[", "'title'", "]", "=", "videoname", "[", "0", ":", "s", "]", ".", "strip", "(", "'.'", ")", "if", "info", "[", "'season'", "]", ">", "0", "and", "info", "[", "'episode'", "]", ">", "0", ":", "info", "[", "'sub_title'", "]", "=", "videoname", "[", "last_index", ":", "s", "]", ".", "strip", "(", "'.'", ")", "last_index", "=", "e", "if", "info", "[", "'title'", "]", "==", "''", ":", "info", "[", "'title'", "]", "=", "videoname", "m", "=", "cls", ".", "RE_SOURCE", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'source'", "]", "=", "m", ".", "group", "(", "'source'", ")", "m", "=", "cls", ".", "RE_AUDIO_ENC", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'audio_encoding'", "]", "=", "m", ".", "group", "(", "'audio_encoding'", ")", "m", "=", "cls", ".", "RE_VIDEO_ENC", ".", "search", "(", "videoname", ")", "if", "m", ":", "info", "[", "'video_encoding'", "]", "=", "m", ".", "group", "(", "'video_encoding'", ")", "return", "info" ]
parse videoname and return video info dict video info contains: - title, the name of video - sub_title, the sub_title of video - resolution, - source, - - season, defaults to 0 - episode, defaults to 0
[ "parse", "videoname", "and", "return", "video", "info", "dict", "video", "info", "contains", ":", "-", "title", "the", "name", "of", "video", "-", "sub_title", "the", "sub_title", "of", "video", "-", "resolution", "-", "source", "-", "-", "season", "defaults", "to", "0", "-", "episode", "defaults", "to", "0" ]
python
train
30.587302
gmr/helper
helper/setupext.py
https://github.com/gmr/helper/blob/fe8e45fc8eabf619429b2940c682c252ee33c082/helper/setupext.py#L50-L70
def run(self): """Import the controller and run it. This mimics the processing done by :func:`helper.start` when a controller is run in the foreground. A new instance of ``self.controller`` is created and run until a keyboard interrupt occurs or the controller stops on its own accord. """ segments = self.controller.split('.') controller_class = reduce(getattr, segments[1:], __import__('.'.join(segments[:-1]))) cmd_line = ['-f'] if self.configuration is not None: cmd_line.extend(['-c', self.configuration]) args = parser.get().parse_args(cmd_line) controller_instance = controller_class(args, platform) try: controller_instance.start() except KeyboardInterrupt: controller_instance.stop()
[ "def", "run", "(", "self", ")", ":", "segments", "=", "self", ".", "controller", ".", "split", "(", "'.'", ")", "controller_class", "=", "reduce", "(", "getattr", ",", "segments", "[", "1", ":", "]", ",", "__import__", "(", "'.'", ".", "join", "(", "segments", "[", ":", "-", "1", "]", ")", ")", ")", "cmd_line", "=", "[", "'-f'", "]", "if", "self", ".", "configuration", "is", "not", "None", ":", "cmd_line", ".", "extend", "(", "[", "'-c'", ",", "self", ".", "configuration", "]", ")", "args", "=", "parser", ".", "get", "(", ")", ".", "parse_args", "(", "cmd_line", ")", "controller_instance", "=", "controller_class", "(", "args", ",", "platform", ")", "try", ":", "controller_instance", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "controller_instance", ".", "stop", "(", ")" ]
Import the controller and run it. This mimics the processing done by :func:`helper.start` when a controller is run in the foreground. A new instance of ``self.controller`` is created and run until a keyboard interrupt occurs or the controller stops on its own accord.
[ "Import", "the", "controller", "and", "run", "it", "." ]
python
train
40.809524
raphaelvallat/pingouin
pingouin/pandas.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L29-L33
def _welch_anova(self, dv=None, between=None, export_filename=None): """Return one-way Welch ANOVA.""" aov = welch_anova(data=self, dv=dv, between=between, export_filename=export_filename) return aov
[ "def", "_welch_anova", "(", "self", ",", "dv", "=", "None", ",", "between", "=", "None", ",", "export_filename", "=", "None", ")", ":", "aov", "=", "welch_anova", "(", "data", "=", "self", ",", "dv", "=", "dv", ",", "between", "=", "between", ",", "export_filename", "=", "export_filename", ")", "return", "aov" ]
Return one-way Welch ANOVA.
[ "Return", "one", "-", "way", "Welch", "ANOVA", "." ]
python
train
45.8
cisco-sas/kitty
kitty/fuzzers/base.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/fuzzers/base.py#L398-L406
def handle_stage_changed(self, model): ''' handle a stage change in the data model :param model: the data model that was changed ''' stages = model.get_stages() if self.dataman: self.dataman.set('stages', stages)
[ "def", "handle_stage_changed", "(", "self", ",", "model", ")", ":", "stages", "=", "model", ".", "get_stages", "(", ")", "if", "self", ".", "dataman", ":", "self", ".", "dataman", ".", "set", "(", "'stages'", ",", "stages", ")" ]
handle a stage change in the data model :param model: the data model that was changed
[ "handle", "a", "stage", "change", "in", "the", "data", "model" ]
python
train
29.444444
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L173-L201
def get_Generic_itemtype(sq, simplify=True): """Retrieves the item type from a PEP 484 generic or subclass of such. sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container. Consequently this also works with typing.List, typing.Set and typing.Dict. Note that for typing.Dict and mapping types in general, the key type is regarded as item type. For typing.Tuple all contained types are returned as a typing.Union. If simplify == True some effort is taken to eliminate redundancies in such a union. """ if is_Tuple(sq): if simplify: itm_tps = [x for x in get_Tuple_params(sq)] simplify_for_Union(itm_tps) return Union[tuple(itm_tps)] else: return Union[get_Tuple_params(sq)] else: try: res = _select_Generic_superclass_parameters(sq, typing.Container) except TypeError: res = None if res is None: try: res = _select_Generic_superclass_parameters(sq, typing.Iterable) except TypeError: pass if res is None: raise TypeError("Has no itemtype: "+type_str(sq)) else: return res[0]
[ "def", "get_Generic_itemtype", "(", "sq", ",", "simplify", "=", "True", ")", ":", "if", "is_Tuple", "(", "sq", ")", ":", "if", "simplify", ":", "itm_tps", "=", "[", "x", "for", "x", "in", "get_Tuple_params", "(", "sq", ")", "]", "simplify_for_Union", "(", "itm_tps", ")", "return", "Union", "[", "tuple", "(", "itm_tps", ")", "]", "else", ":", "return", "Union", "[", "get_Tuple_params", "(", "sq", ")", "]", "else", ":", "try", ":", "res", "=", "_select_Generic_superclass_parameters", "(", "sq", ",", "typing", ".", "Container", ")", "except", "TypeError", ":", "res", "=", "None", "if", "res", "is", "None", ":", "try", ":", "res", "=", "_select_Generic_superclass_parameters", "(", "sq", ",", "typing", ".", "Iterable", ")", "except", "TypeError", ":", "pass", "if", "res", "is", "None", ":", "raise", "TypeError", "(", "\"Has no itemtype: \"", "+", "type_str", "(", "sq", ")", ")", "else", ":", "return", "res", "[", "0", "]" ]
Retrieves the item type from a PEP 484 generic or subclass of such. sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container. Consequently this also works with typing.List, typing.Set and typing.Dict. Note that for typing.Dict and mapping types in general, the key type is regarded as item type. For typing.Tuple all contained types are returned as a typing.Union. If simplify == True some effort is taken to eliminate redundancies in such a union.
[ "Retrieves", "the", "item", "type", "from", "a", "PEP", "484", "generic", "or", "subclass", "of", "such", ".", "sq", "must", "be", "a", "typing", ".", "Tuple", "or", "(", "subclass", "of", ")", "typing", ".", "Iterable", "or", "typing", ".", "Container", ".", "Consequently", "this", "also", "works", "with", "typing", ".", "List", "typing", ".", "Set", "and", "typing", ".", "Dict", ".", "Note", "that", "for", "typing", ".", "Dict", "and", "mapping", "types", "in", "general", "the", "key", "type", "is", "regarded", "as", "item", "type", ".", "For", "typing", ".", "Tuple", "all", "contained", "types", "are", "returned", "as", "a", "typing", ".", "Union", ".", "If", "simplify", "==", "True", "some", "effort", "is", "taken", "to", "eliminate", "redundancies", "in", "such", "a", "union", "." ]
python
train
41.758621
saltstack/salt
salt/modules/boto_elbv2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elbv2.py#L417-L482
def describe_target_groups(names=None, target_group_arns=None, load_balancer_arn=None, region=None, key=None, keyid=None, profile=None): ''' Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_target_groups salt myminion boto_elbv2.describe_target_groups target_group_name salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]" ''' if names and target_group_arns: raise SaltInvocationError('At most one of names or target_group_arns may ' 'be provided') if names: target_groups = names elif target_group_arns: target_groups = target_group_arns else: target_groups = None tg_list = [] if target_groups: if isinstance(target_groups, str) or isinstance(target_groups, six.text_type): tg_list.append(target_groups) else: for group in target_groups: tg_list.append(group) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if names: ret = conn.describe_target_groups(Names=tg_list)['TargetGroups'] elif target_group_arns: ret = conn.describe_target_groups(TargetGroupArns=tg_list)['TargetGroups'] elif load_balancer_arn: ret = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn)['TargetGroups'] else: ret = [] next_marker = '' while True: r = conn.describe_target_groups(Marker=next_marker) for alb in r['TargetGroups']: ret.append(alb) if 'NextMarker' in r: next_marker = r['NextMarker'] else: break return ret if ret else [] except ClientError as error: log.warning(error) return False
[ "def", "describe_target_groups", "(", "names", "=", "None", ",", "target_group_arns", "=", "None", ",", "load_balancer_arn", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "names", "and", "target_group_arns", ":", "raise", "SaltInvocationError", "(", "'At most one of names or target_group_arns may '", "'be provided'", ")", "if", "names", ":", "target_groups", "=", "names", "elif", "target_group_arns", ":", "target_groups", "=", "target_group_arns", "else", ":", "target_groups", "=", "None", "tg_list", "=", "[", "]", "if", "target_groups", ":", "if", "isinstance", "(", "target_groups", ",", "str", ")", "or", "isinstance", "(", "target_groups", ",", "six", ".", "text_type", ")", ":", "tg_list", ".", "append", "(", "target_groups", ")", "else", ":", "for", "group", "in", "target_groups", ":", "tg_list", ".", "append", "(", "group", ")", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "if", "names", ":", "ret", "=", "conn", ".", "describe_target_groups", "(", "Names", "=", "tg_list", ")", "[", "'TargetGroups'", "]", "elif", "target_group_arns", ":", "ret", "=", "conn", ".", "describe_target_groups", "(", "TargetGroupArns", "=", "tg_list", ")", "[", "'TargetGroups'", "]", "elif", "load_balancer_arn", ":", "ret", "=", "conn", ".", "describe_target_groups", "(", "LoadBalancerArn", "=", "load_balancer_arn", ")", "[", "'TargetGroups'", "]", "else", ":", "ret", "=", "[", "]", "next_marker", "=", "''", "while", "True", ":", "r", "=", "conn", ".", "describe_target_groups", "(", "Marker", "=", "next_marker", ")", "for", "alb", "in", "r", "[", "'TargetGroups'", "]", ":", "ret", ".", "append", "(", "alb", ")", "if", "'NextMarker'", "in", "r", ":", "next_marker", "=", "r", "[", "'NextMarker'", "]", "else", ":", "break", "return", "ret", "if", "ret", "else", "[", "]", "except", "ClientError", "as", "error", ":", "log", ".", "warning", "(", "error", ")", "return", "False" ]
Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_target_groups salt myminion boto_elbv2.describe_target_groups target_group_name salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]"
[ "Describes", "the", "specified", "target", "groups", "or", "all", "of", "your", "target", "groups", ".", "By", "default", "all", "target", "groups", "are", "described", ".", "Alternatively", "you", "can", "specify", "one", "of", "the", "following", "to", "filter", "the", "results", ":", "the", "ARN", "of", "the", "load", "balancer", "the", "names", "of", "one", "or", "more", "target", "groups", "or", "the", "ARNs", "of", "one", "or", "more", "target", "groups", "." ]
python
train
35.393939
vlukes/dicom2fem
dicom2fem/mesh.py
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/mesh.py#L153-L185
def get_min_vertex_distance( coor, guess ): """Can miss the minimum, but is enough for our purposes.""" # Sort by x. ix = nm.argsort( coor[:,0] ) scoor = coor[ix] mvd = 1e16 # Get mvd in chunks potentially smaller than guess. n_coor = coor.shape[0] print n_coor i0 = i1 = 0 x0 = scoor[i0,0] while 1: while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)): i1 += 1 # print i0, i1, x0, scoor[i1,0] aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] ) if aux < mvd: im, a1, a2 = aim, aa1 + i0, aa2 + i0 mvd = min( mvd, aux ) i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1 # i0 += 1 x0 = scoor[i0,0] # print '-', i0 if i1 == n_coor - 1: break print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2] return mvd
[ "def", "get_min_vertex_distance", "(", "coor", ",", "guess", ")", ":", "# Sort by x.", "ix", "=", "nm", ".", "argsort", "(", "coor", "[", ":", ",", "0", "]", ")", "scoor", "=", "coor", "[", "ix", "]", "mvd", "=", "1e16", "# Get mvd in chunks potentially smaller than guess.", "n_coor", "=", "coor", ".", "shape", "[", "0", "]", "print", "n_coor", "i0", "=", "i1", "=", "0", "x0", "=", "scoor", "[", "i0", ",", "0", "]", "while", "1", ":", "while", "(", "(", "scoor", "[", "i1", ",", "0", "]", "-", "x0", ")", "<", "guess", ")", "and", "(", "i1", "<", "(", "n_coor", "-", "1", ")", ")", ":", "i1", "+=", "1", "# print i0, i1, x0, scoor[i1,0]", "aim", ",", "aa1", ",", "aa2", ",", "aux", "=", "get_min_vertex_distance_naive", "(", "scoor", "[", "i0", ":", "i1", "+", "1", "]", ")", "if", "aux", "<", "mvd", ":", "im", ",", "a1", ",", "a2", "=", "aim", ",", "aa1", "+", "i0", ",", "aa2", "+", "i0", "mvd", "=", "min", "(", "mvd", ",", "aux", ")", "i0", "=", "i1", "=", "int", "(", "0.5", "*", "(", "i1", "+", "i0", ")", ")", "+", "1", "# i0 += 1", "x0", "=", "scoor", "[", "i0", ",", "0", "]", "# print '-', i0", "if", "i1", "==", "n_coor", "-", "1", ":", "break", "print", "im", ",", "ix", "[", "a1", "]", ",", "ix", "[", "a2", "]", ",", "a1", ",", "a2", ",", "scoor", "[", "a1", "]", ",", "scoor", "[", "a2", "]", "return", "mvd" ]
Can miss the minimum, but is enough for our purposes.
[ "Can", "miss", "the", "minimum", "but", "is", "enough", "for", "our", "purposes", "." ]
python
train
25.787879
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiEquipment.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiEquipment.py#L46-L59
def search(self, **kwargs): """ Method to search equipments based on extends search. :param search: Dict containing QuerySets to find equipments. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing equipments """ return super(ApiEquipment, self).get(self.prepare_url('api/v3/equipment/', kwargs))
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "ApiEquipment", ",", "self", ")", ".", "get", "(", "self", ".", "prepare_url", "(", "'api/v3/equipment/'", ",", "kwargs", ")", ")" ]
Method to search equipments based on extends search. :param search: Dict containing QuerySets to find equipments. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing equipments
[ "Method", "to", "search", "equipments", "based", "on", "extends", "search", "." ]
python
train
48.357143
sassoo/goldman
goldman/queryparams/fields.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/fields.py#L24-L50
def _parse_param(key, val): """ Parse the query param looking for sparse fields params Ensure the `val` or what will become the sparse fields is always an array. If the query param is not a sparse fields query param then return None. :param key: the query parameter key in the request (left of =) :param val: the query parameter val in the request (right of =) :return: tuple of resource type to implement the sparse fields on & a array of the fields. """ regex = re.compile(r'fields\[([A-Za-z]+)\]') match = regex.match(key) if match: if not isinstance(val, list): val = val.split(',') fields = [field.lower() for field in val] rtype = match.groups()[0].lower() return rtype, fields
[ "def", "_parse_param", "(", "key", ",", "val", ")", ":", "regex", "=", "re", ".", "compile", "(", "r'fields\\[([A-Za-z]+)\\]'", ")", "match", "=", "regex", ".", "match", "(", "key", ")", "if", "match", ":", "if", "not", "isinstance", "(", "val", ",", "list", ")", ":", "val", "=", "val", ".", "split", "(", "','", ")", "fields", "=", "[", "field", ".", "lower", "(", ")", "for", "field", "in", "val", "]", "rtype", "=", "match", ".", "groups", "(", ")", "[", "0", "]", ".", "lower", "(", ")", "return", "rtype", ",", "fields" ]
Parse the query param looking for sparse fields params Ensure the `val` or what will become the sparse fields is always an array. If the query param is not a sparse fields query param then return None. :param key: the query parameter key in the request (left of =) :param val: the query parameter val in the request (right of =) :return: tuple of resource type to implement the sparse fields on & a array of the fields.
[ "Parse", "the", "query", "param", "looking", "for", "sparse", "fields", "params" ]
python
train
29
mapbox/rio-mucho
examples/simple_read.py
https://github.com/mapbox/rio-mucho/blob/b2267bda2a7ac8557c9328742aeaab6adc825315/examples/simple_read.py#L7-L10
def read_function(data, window, ij, g_args): """Takes an array, and sets any value above the mean to the max, the rest to 0""" output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max() return output
[ "def", "read_function", "(", "data", ",", "window", ",", "ij", ",", "g_args", ")", ":", "output", "=", "(", "data", "[", "0", "]", ">", "numpy", ".", "mean", "(", "data", "[", "0", "]", ")", ")", ".", "astype", "(", "data", "[", "0", "]", ".", "dtype", ")", "*", "data", "[", "0", "]", ".", "max", "(", ")", "return", "output" ]
Takes an array, and sets any value above the mean to the max, the rest to 0
[ "Takes", "an", "array", "and", "sets", "any", "value", "above", "the", "mean", "to", "the", "max", "the", "rest", "to", "0" ]
python
train
57
apache/airflow
airflow/www/utils.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/utils.py#L193-L201
def json_response(obj): """ returns a json response from a json serializable python object """ return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
[ "def", "json_response", "(", "obj", ")", ":", "return", "Response", "(", "response", "=", "json", ".", "dumps", "(", "obj", ",", "indent", "=", "4", ",", "cls", "=", "AirflowJsonEncoder", ")", ",", "status", "=", "200", ",", "mimetype", "=", "\"application/json\"", ")" ]
returns a json response from a json serializable python object
[ "returns", "a", "json", "response", "from", "a", "json", "serializable", "python", "object" ]
python
test
28.555556
bcbio/bcbio-nextgen
bcbio/variation/multi.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L106-L133
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn): """Shared functionality for grouping by batches for variant calling and joint calling. """ singles = [] batch_groups = collections.defaultdict(list) for args in xs: data = utils.to_single_data(args) caller, batch = caller_batch_fn(data) region = _list_to_tuple(data["region"]) if "region" in data else () if batch is not None: batches = batch if isinstance(batch, (list, tuple)) else [batch] for b in batches: batch_groups[(b, region, caller)].append(utils.deepish_copy(data)) else: data = prep_data_fn(data, [data]) singles.append(data) batches = [] for batch, items in batch_groups.items(): batch_data = utils.deepish_copy(_pick_lead_item(items)) # For nested primary batches, split permanently by batch if tz.get_in(["metadata", "batch"], batch_data): batch_name = batch[0] batch_data["metadata"]["batch"] = batch_name batch_data = prep_data_fn(batch_data, items) batch_data["group_orig"] = _collapse_subitems(batch_data, items) batch_data["group"] = batch batches.append(batch_data) return singles + batches
[ "def", "_group_batches_shared", "(", "xs", ",", "caller_batch_fn", ",", "prep_data_fn", ")", ":", "singles", "=", "[", "]", "batch_groups", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "args", "in", "xs", ":", "data", "=", "utils", ".", "to_single_data", "(", "args", ")", "caller", ",", "batch", "=", "caller_batch_fn", "(", "data", ")", "region", "=", "_list_to_tuple", "(", "data", "[", "\"region\"", "]", ")", "if", "\"region\"", "in", "data", "else", "(", ")", "if", "batch", "is", "not", "None", ":", "batches", "=", "batch", "if", "isinstance", "(", "batch", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "batch", "]", "for", "b", "in", "batches", ":", "batch_groups", "[", "(", "b", ",", "region", ",", "caller", ")", "]", ".", "append", "(", "utils", ".", "deepish_copy", "(", "data", ")", ")", "else", ":", "data", "=", "prep_data_fn", "(", "data", ",", "[", "data", "]", ")", "singles", ".", "append", "(", "data", ")", "batches", "=", "[", "]", "for", "batch", ",", "items", "in", "batch_groups", ".", "items", "(", ")", ":", "batch_data", "=", "utils", ".", "deepish_copy", "(", "_pick_lead_item", "(", "items", ")", ")", "# For nested primary batches, split permanently by batch", "if", "tz", ".", "get_in", "(", "[", "\"metadata\"", ",", "\"batch\"", "]", ",", "batch_data", ")", ":", "batch_name", "=", "batch", "[", "0", "]", "batch_data", "[", "\"metadata\"", "]", "[", "\"batch\"", "]", "=", "batch_name", "batch_data", "=", "prep_data_fn", "(", "batch_data", ",", "items", ")", "batch_data", "[", "\"group_orig\"", "]", "=", "_collapse_subitems", "(", "batch_data", ",", "items", ")", "batch_data", "[", "\"group\"", "]", "=", "batch", "batches", ".", "append", "(", "batch_data", ")", "return", "singles", "+", "batches" ]
Shared functionality for grouping by batches for variant calling and joint calling.
[ "Shared", "functionality", "for", "grouping", "by", "batches", "for", "variant", "calling", "and", "joint", "calling", "." ]
python
train
45.071429
Shizmob/pydle
pydle/features/ircv3/cap.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/ircv3/cap.py#L65-L75
async def on_raw_cap(self, message): """ Handle CAP message. """ target, subcommand = message.params[:2] params = message.params[2:] # Call handler. attr = 'on_raw_cap_' + pydle.protocol.identifierify(subcommand) if hasattr(self, attr): await getattr(self, attr)(params) else: self.logger.warning('Unknown CAP subcommand sent from server: %s', subcommand)
[ "async", "def", "on_raw_cap", "(", "self", ",", "message", ")", ":", "target", ",", "subcommand", "=", "message", ".", "params", "[", ":", "2", "]", "params", "=", "message", ".", "params", "[", "2", ":", "]", "# Call handler.", "attr", "=", "'on_raw_cap_'", "+", "pydle", ".", "protocol", ".", "identifierify", "(", "subcommand", ")", "if", "hasattr", "(", "self", ",", "attr", ")", ":", "await", "getattr", "(", "self", ",", "attr", ")", "(", "params", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "'Unknown CAP subcommand sent from server: %s'", ",", "subcommand", ")" ]
Handle CAP message.
[ "Handle", "CAP", "message", "." ]
python
train
38.727273
jjkester/django-auditlog
src/auditlog/compat.py
https://github.com/jjkester/django-auditlog/blob/a22978e05b7ed43b87e4b6109550b86c738578fe/src/auditlog/compat.py#L3-L20
def is_authenticated(user): """Return whether or not a User is authenticated. Function provides compatibility following deprecation of method call to `is_authenticated()` in Django 2.0. This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier), as `is_authenticated` was introduced as a property in v1.10.s """ if not hasattr(user, 'is_authenticated'): return False if callable(user.is_authenticated): # Will be callable if django.version < 2.0, but is only necessary in # v1.9 and earlier due to change introduced in v1.10 making # `is_authenticated` a property instead of a callable. return user.is_authenticated() else: return user.is_authenticated
[ "def", "is_authenticated", "(", "user", ")", ":", "if", "not", "hasattr", "(", "user", ",", "'is_authenticated'", ")", ":", "return", "False", "if", "callable", "(", "user", ".", "is_authenticated", ")", ":", "# Will be callable if django.version < 2.0, but is only necessary in", "# v1.9 and earlier due to change introduced in v1.10 making", "# `is_authenticated` a property instead of a callable.", "return", "user", ".", "is_authenticated", "(", ")", "else", ":", "return", "user", ".", "is_authenticated" ]
Return whether or not a User is authenticated. Function provides compatibility following deprecation of method call to `is_authenticated()` in Django 2.0. This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier), as `is_authenticated` was introduced as a property in v1.10.s
[ "Return", "whether", "or", "not", "a", "User", "is", "authenticated", "." ]
python
train
40.833333
chriskiehl/Gooey
gooey/gui/containers/application.py
https://github.com/chriskiehl/Gooey/blob/e598573c6519b953e0ccfc1f3663f827f8cd7e22/gooey/gui/containers/application.py#L189-L199
def buildNavigation(self): """ Chooses the appropriate layout navigation component based on user prefs """ if self.buildSpec['navigation'] == constants.TABBED: navigation = Tabbar(self, self.buildSpec, self.configs) else: navigation = Sidebar(self, self.buildSpec, self.configs) if self.buildSpec['navigation'] == constants.HIDDEN: navigation.Hide() return navigation
[ "def", "buildNavigation", "(", "self", ")", ":", "if", "self", ".", "buildSpec", "[", "'navigation'", "]", "==", "constants", ".", "TABBED", ":", "navigation", "=", "Tabbar", "(", "self", ",", "self", ".", "buildSpec", ",", "self", ".", "configs", ")", "else", ":", "navigation", "=", "Sidebar", "(", "self", ",", "self", ".", "buildSpec", ",", "self", ".", "configs", ")", "if", "self", ".", "buildSpec", "[", "'navigation'", "]", "==", "constants", ".", "HIDDEN", ":", "navigation", ".", "Hide", "(", ")", "return", "navigation" ]
Chooses the appropriate layout navigation component based on user prefs
[ "Chooses", "the", "appropriate", "layout", "navigation", "component", "based", "on", "user", "prefs" ]
python
train
42.454545
apple/turicreate
src/unity/python/turicreate/util/_cloudpickle.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1090-L1105
def _fill_function(func, globals, defaults, dict, module, closure_values): """ Fills in the rest of function data into the skeleton function object that were created via _make_skel_func(). """ func.__globals__.update(globals) func.__defaults__ = defaults func.__dict__ = dict func.__module__ = module cells = func.__closure__ if cells is not None: for cell, value in zip(cells, closure_values): if value is not _empty_cell_value: cell_set(cell, value) return func
[ "def", "_fill_function", "(", "func", ",", "globals", ",", "defaults", ",", "dict", ",", "module", ",", "closure_values", ")", ":", "func", ".", "__globals__", ".", "update", "(", "globals", ")", "func", ".", "__defaults__", "=", "defaults", "func", ".", "__dict__", "=", "dict", "func", ".", "__module__", "=", "module", "cells", "=", "func", ".", "__closure__", "if", "cells", "is", "not", "None", ":", "for", "cell", ",", "value", "in", "zip", "(", "cells", ",", "closure_values", ")", ":", "if", "value", "is", "not", "_empty_cell_value", ":", "cell_set", "(", "cell", ",", "value", ")", "return", "func" ]
Fills in the rest of function data into the skeleton function object that were created via _make_skel_func().
[ "Fills", "in", "the", "rest", "of", "function", "data", "into", "the", "skeleton", "function", "object", "that", "were", "created", "via", "_make_skel_func", "()", "." ]
python
train
33.125
rbarrois/django-batchform
batchform/views.py
https://github.com/rbarrois/django-batchform/blob/f6b659a6790750285af248ccd1d4d178ecbad129/batchform/views.py#L119-L128
def form_lines_valid(self, form): """Handle a valid LineFormSet.""" handled = 0 for inner_form in form: if not inner_form.cleaned_data.get(formsets.DELETION_FIELD_NAME): handled += 1 self.handle_inner_form(inner_form) self.log_and_notify_lines(handled) return http.HttpResponseRedirect(self.get_success_url())
[ "def", "form_lines_valid", "(", "self", ",", "form", ")", ":", "handled", "=", "0", "for", "inner_form", "in", "form", ":", "if", "not", "inner_form", ".", "cleaned_data", ".", "get", "(", "formsets", ".", "DELETION_FIELD_NAME", ")", ":", "handled", "+=", "1", "self", ".", "handle_inner_form", "(", "inner_form", ")", "self", ".", "log_and_notify_lines", "(", "handled", ")", "return", "http", ".", "HttpResponseRedirect", "(", "self", ".", "get_success_url", "(", ")", ")" ]
Handle a valid LineFormSet.
[ "Handle", "a", "valid", "LineFormSet", "." ]
python
train
38.5
mikedh/trimesh
trimesh/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L1292-L1398
def submesh(mesh, faces_sequence, only_watertight=False, append=False): """ Return a subset of a mesh. Parameters ---------- mesh : Trimesh Source mesh to take geometry from faces_sequence : sequence (p,) int Indexes of mesh.faces only_watertight : bool Only return submeshes which are watertight. append : bool Return a single mesh which has the faces appended, if this flag is set, only_watertight is ignored Returns --------- if append : Trimesh object else list of Trimesh objects """ # evaluate generators so we can escape early faces_sequence = list(faces_sequence) if len(faces_sequence) == 0: return [] # check to make sure we're not doing a whole bunch of work # to deliver a subset which ends up as the whole mesh if len(faces_sequence[0]) == len(mesh.faces): all_faces = np.array_equal(np.sort(faces_sequence), np.arange(len(faces_sequence))) if all_faces: log.debug('entire mesh requested, returning copy') return mesh.copy() # avoid nuking the cache on the original mesh original_faces = mesh.faces.view(np.ndarray) original_vertices = mesh.vertices.view(np.ndarray) faces = [] vertices = [] normals = [] visuals = [] # for reindexing faces mask = np.arange(len(original_vertices)) for faces_index in faces_sequence: # sanitize indices in case they are coming in as a set or tuple faces_index = np.asanyarray(faces_index, dtype=np.int64) if len(faces_index) == 0: continue faces_current = original_faces[faces_index] unique = np.unique(faces_current.reshape(-1)) # redefine face indices from zero mask[unique] = np.arange(len(unique)) normals.append(mesh.face_normals[faces_index]) faces.append(mask[faces_current]) vertices.append(original_vertices[unique]) visuals.append(mesh.visual.face_subset(faces_index)) # we use type(mesh) rather than importing Trimesh from base # to avoid a circular import trimesh_type = type_named(mesh, 'Trimesh') if append: if all(hasattr(i, 'concatenate') for i in visuals): visuals = np.array(visuals) visual = visuals[0].concatenate(visuals[1:]) else: visual = None vertices, faces = append_faces(vertices, faces) appended = trimesh_type( vertices=vertices, faces=faces, face_normals=np.vstack(normals), visual=visual, process=False) return appended # generate a list of Trimesh objects result = [trimesh_type( vertices=v, faces=f, face_normals=n, visual=c, metadata=copy.deepcopy(mesh.metadata), process=False) for v, f, n, c in zip(vertices, faces, normals, visuals)] result = np.array(result) if len(result) > 0 and only_watertight: # fill_holes will attempt a repair and returns the # watertight status at the end of the repair attempt watertight = np.array([i.fill_holes() and len(i.faces) >= 4 for i in result]) # remove unrepairable meshes result = result[watertight] return result
[ "def", "submesh", "(", "mesh", ",", "faces_sequence", ",", "only_watertight", "=", "False", ",", "append", "=", "False", ")", ":", "# evaluate generators so we can escape early", "faces_sequence", "=", "list", "(", "faces_sequence", ")", "if", "len", "(", "faces_sequence", ")", "==", "0", ":", "return", "[", "]", "# check to make sure we're not doing a whole bunch of work", "# to deliver a subset which ends up as the whole mesh", "if", "len", "(", "faces_sequence", "[", "0", "]", ")", "==", "len", "(", "mesh", ".", "faces", ")", ":", "all_faces", "=", "np", ".", "array_equal", "(", "np", ".", "sort", "(", "faces_sequence", ")", ",", "np", ".", "arange", "(", "len", "(", "faces_sequence", ")", ")", ")", "if", "all_faces", ":", "log", ".", "debug", "(", "'entire mesh requested, returning copy'", ")", "return", "mesh", ".", "copy", "(", ")", "# avoid nuking the cache on the original mesh", "original_faces", "=", "mesh", ".", "faces", ".", "view", "(", "np", ".", "ndarray", ")", "original_vertices", "=", "mesh", ".", "vertices", ".", "view", "(", "np", ".", "ndarray", ")", "faces", "=", "[", "]", "vertices", "=", "[", "]", "normals", "=", "[", "]", "visuals", "=", "[", "]", "# for reindexing faces", "mask", "=", "np", ".", "arange", "(", "len", "(", "original_vertices", ")", ")", "for", "faces_index", "in", "faces_sequence", ":", "# sanitize indices in case they are coming in as a set or tuple", "faces_index", "=", "np", ".", "asanyarray", "(", "faces_index", ",", "dtype", "=", "np", ".", "int64", ")", "if", "len", "(", "faces_index", ")", "==", "0", ":", "continue", "faces_current", "=", "original_faces", "[", "faces_index", "]", "unique", "=", "np", ".", "unique", "(", "faces_current", ".", "reshape", "(", "-", "1", ")", ")", "# redefine face indices from zero", "mask", "[", "unique", "]", "=", "np", ".", "arange", "(", "len", "(", "unique", ")", ")", "normals", ".", "append", "(", "mesh", ".", "face_normals", "[", "faces_index", "]", ")", "faces", ".", "append", "(", "mask", "[", "faces_current", "]", ")", "vertices", ".", "append", "(", "original_vertices", "[", "unique", "]", ")", "visuals", ".", "append", "(", "mesh", ".", "visual", ".", "face_subset", "(", "faces_index", ")", ")", "# we use type(mesh) rather than importing Trimesh from base", "# to avoid a circular import", "trimesh_type", "=", "type_named", "(", "mesh", ",", "'Trimesh'", ")", "if", "append", ":", "if", "all", "(", "hasattr", "(", "i", ",", "'concatenate'", ")", "for", "i", "in", "visuals", ")", ":", "visuals", "=", "np", ".", "array", "(", "visuals", ")", "visual", "=", "visuals", "[", "0", "]", ".", "concatenate", "(", "visuals", "[", "1", ":", "]", ")", "else", ":", "visual", "=", "None", "vertices", ",", "faces", "=", "append_faces", "(", "vertices", ",", "faces", ")", "appended", "=", "trimesh_type", "(", "vertices", "=", "vertices", ",", "faces", "=", "faces", ",", "face_normals", "=", "np", ".", "vstack", "(", "normals", ")", ",", "visual", "=", "visual", ",", "process", "=", "False", ")", "return", "appended", "# generate a list of Trimesh objects", "result", "=", "[", "trimesh_type", "(", "vertices", "=", "v", ",", "faces", "=", "f", ",", "face_normals", "=", "n", ",", "visual", "=", "c", ",", "metadata", "=", "copy", ".", "deepcopy", "(", "mesh", ".", "metadata", ")", ",", "process", "=", "False", ")", "for", "v", ",", "f", ",", "n", ",", "c", "in", "zip", "(", "vertices", ",", "faces", ",", "normals", ",", "visuals", ")", "]", "result", "=", "np", ".", "array", "(", "result", ")", "if", "len", "(", "result", ")", ">", "0", "and", "only_watertight", ":", "# fill_holes will attempt a repair and returns the", "# watertight status at the end of the repair attempt", "watertight", "=", "np", ".", "array", "(", "[", "i", ".", "fill_holes", "(", ")", "and", "len", "(", "i", ".", "faces", ")", ">=", "4", "for", "i", "in", "result", "]", ")", "# remove unrepairable meshes", "result", "=", "result", "[", "watertight", "]", "return", "result" ]
Return a subset of a mesh. Parameters ---------- mesh : Trimesh Source mesh to take geometry from faces_sequence : sequence (p,) int Indexes of mesh.faces only_watertight : bool Only return submeshes which are watertight. append : bool Return a single mesh which has the faces appended, if this flag is set, only_watertight is ignored Returns --------- if append : Trimesh object else list of Trimesh objects
[ "Return", "a", "subset", "of", "a", "mesh", "." ]
python
train
32.373832
manns/pyspread
pyspread/src/model/model.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L1078-L1088
def set_row_height(self, row, tab, height): """Sets row height""" try: old_height = self.row_heights.pop((row, tab)) except KeyError: old_height = None if height is not None: self.row_heights[(row, tab)] = float(height)
[ "def", "set_row_height", "(", "self", ",", "row", ",", "tab", ",", "height", ")", ":", "try", ":", "old_height", "=", "self", ".", "row_heights", ".", "pop", "(", "(", "row", ",", "tab", ")", ")", "except", "KeyError", ":", "old_height", "=", "None", "if", "height", "is", "not", "None", ":", "self", ".", "row_heights", "[", "(", "row", ",", "tab", ")", "]", "=", "float", "(", "height", ")" ]
Sets row height
[ "Sets", "row", "height" ]
python
train
25.454545
qubole/qds-sdk-py
qds_sdk/cluster.py
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L907-L926
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None, timeout_for_request=None, allow_fallback=True): """ Purchase options for stable spot instances. `maximum_bid_price_percentage`: Maximum value to bid for stable node spot instances, expressed as a percentage of the base price (applies to both master and slave nodes). `timeout_for_request`: Timeout for a stable node spot instance request (Unit: minutes) `allow_fallback`: Whether to fallback to on-demand instances for stable nodes if spot instances are not available """ self.hadoop_settings['stable_spot_instance_settings'] = { 'maximum_bid_price_percentage': maximum_bid_price_percentage, 'timeout_for_request': timeout_for_request, 'allow_fallback': allow_fallback}
[ "def", "set_stable_spot_instance_settings", "(", "self", ",", "maximum_bid_price_percentage", "=", "None", ",", "timeout_for_request", "=", "None", ",", "allow_fallback", "=", "True", ")", ":", "self", ".", "hadoop_settings", "[", "'stable_spot_instance_settings'", "]", "=", "{", "'maximum_bid_price_percentage'", ":", "maximum_bid_price_percentage", ",", "'timeout_for_request'", ":", "timeout_for_request", ",", "'allow_fallback'", ":", "allow_fallback", "}" ]
Purchase options for stable spot instances. `maximum_bid_price_percentage`: Maximum value to bid for stable node spot instances, expressed as a percentage of the base price (applies to both master and slave nodes). `timeout_for_request`: Timeout for a stable node spot instance request (Unit: minutes) `allow_fallback`: Whether to fallback to on-demand instances for stable nodes if spot instances are not available
[ "Purchase", "options", "for", "stable", "spot", "instances", "." ]
python
train
48.25
acutesoftware/AIKIF
aikif/agents/agent_map_data.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent_map_data.py#L29-L37
def map_data(self): """ provides a mapping from the CSV file to the aikif data structures. """ with open(self.src_file, "r") as f: for line in f: cols = line.split(',') print(cols)
[ "def", "map_data", "(", "self", ")", ":", "with", "open", "(", "self", ".", "src_file", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "cols", "=", "line", ".", "split", "(", "','", ")", "print", "(", "cols", ")" ]
provides a mapping from the CSV file to the aikif data structures.
[ "provides", "a", "mapping", "from", "the", "CSV", "file", "to", "the", "aikif", "data", "structures", "." ]
python
train
28.555556
annoviko/pyclustering
pyclustering/nnet/syncsegm.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/syncsegm.py#L131-L153
def allocate_objects(self, eps = 0.01, noise_size = 1): """! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment. """ if (self.__object_segment_analysers is None): return []; segments = []; for object_segment_analyser in self.__object_segment_analysers: indexes = object_segment_analyser['color_segment']; analyser = object_segment_analyser['analyser']; segments += analyser.allocate_clusters(eps, indexes); real_segments = [segment for segment in segments if len(segment) > noise_size]; return real_segments;
[ "def", "allocate_objects", "(", "self", ",", "eps", "=", "0.01", ",", "noise_size", "=", "1", ")", ":", "if", "(", "self", ".", "__object_segment_analysers", "is", "None", ")", ":", "return", "[", "]", "segments", "=", "[", "]", "for", "object_segment_analyser", "in", "self", ".", "__object_segment_analysers", ":", "indexes", "=", "object_segment_analyser", "[", "'color_segment'", "]", "analyser", "=", "object_segment_analyser", "[", "'analyser'", "]", "segments", "+=", "analyser", ".", "allocate_clusters", "(", "eps", ",", "indexes", ")", "real_segments", "=", "[", "segment", "for", "segment", "in", "segments", "if", "len", "(", "segment", ")", ">", "noise_size", "]", "return", "real_segments" ]
! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
[ "!" ]
python
valid
46.521739
lago-project/lago
lago/sdk.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/sdk.py#L38-L64
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs): """ Initialize the Lago environment Args: config(str): Path to LagoInitFile workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago" **kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init` logfile(str): A path to setup a log file. loglevel(int): :mod:`logging` log level. Returns: :class:`~lago.sdk.SDK`: Initialized Lago enviornment Raises: :exc:`~lago.utils.LagoException`: If initialization failed """ setup_sdk_logging(logfile, loglevel) defaults = lago_config.get_section('init') if workdir is None: workdir = os.path.abspath('.lago') defaults['workdir'] = workdir defaults['virt_config'] = config defaults.update(kwargs) workdir, prefix = cmd.do_init(**defaults) return SDK(workdir, prefix)
[ "def", "init", "(", "config", ",", "workdir", "=", "None", ",", "logfile", "=", "None", ",", "loglevel", "=", "logging", ".", "INFO", ",", "*", "*", "kwargs", ")", ":", "setup_sdk_logging", "(", "logfile", ",", "loglevel", ")", "defaults", "=", "lago_config", ".", "get_section", "(", "'init'", ")", "if", "workdir", "is", "None", ":", "workdir", "=", "os", ".", "path", ".", "abspath", "(", "'.lago'", ")", "defaults", "[", "'workdir'", "]", "=", "workdir", "defaults", "[", "'virt_config'", "]", "=", "config", "defaults", ".", "update", "(", "kwargs", ")", "workdir", ",", "prefix", "=", "cmd", ".", "do_init", "(", "*", "*", "defaults", ")", "return", "SDK", "(", "workdir", ",", "prefix", ")" ]
Initialize the Lago environment Args: config(str): Path to LagoInitFile workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago" **kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init` logfile(str): A path to setup a log file. loglevel(int): :mod:`logging` log level. Returns: :class:`~lago.sdk.SDK`: Initialized Lago enviornment Raises: :exc:`~lago.utils.LagoException`: If initialization failed
[ "Initialize", "the", "Lago", "environment" ]
python
train
32.925926
numenta/htmresearch
projects/sequence_prediction/continuous_sequence/plotPerformance.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/continuous_sequence/plotPerformance.py#L45-L55
def getDatetimeAxis(): """ use datetime as x-axis """ dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek']) xaxisDate = pd.to_datetime(data['datetime']) return xaxisDate
[ "def", "getDatetimeAxis", "(", ")", ":", "dataSet", "=", "'nyc_taxi'", "filePath", "=", "'./data/'", "+", "dataSet", "+", "'.csv'", "data", "=", "pd", ".", "read_csv", "(", "filePath", ",", "header", "=", "0", ",", "skiprows", "=", "[", "1", ",", "2", "]", ",", "names", "=", "[", "'datetime'", ",", "'value'", ",", "'timeofday'", ",", "'dayofweek'", "]", ")", "xaxisDate", "=", "pd", ".", "to_datetime", "(", "data", "[", "'datetime'", "]", ")", "return", "xaxisDate" ]
use datetime as x-axis
[ "use", "datetime", "as", "x", "-", "axis" ]
python
train
28.636364
numenta/htmresearch
htmresearch/regions/ApicalTMSequenceRegion.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/ApicalTMSequenceRegion.py#L39-L279
def getSpec(cls): """ Return the Spec for ApicalTMSequenceRegion. """ spec = { "description": ApicalTMSequenceRegion.__doc__, "singleNodeOnly": True, "inputs": { "activeColumns": { "description": ("An array of 0's and 1's representing the active " "minicolumns, i.e. the input to the TemporalMemory"), "dataType": "Real32", "count": 0, "required": True, "regionLevel": True, "isDefaultInput": True, "requireSplitterMap": False }, "resetIn": { "description": ("A boolean flag that indicates whether" " or not the input vector received in this compute cycle" " represents the first presentation in a" " new temporal sequence."), "dataType": "Real32", "count": 1, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalInput": { "description": "An array of 0's and 1's representing top down input." " The input will be provided to apical dendrites.", "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalGrowthCandidates": { "description": ("An array of 0's and 1's representing apical input " "that can be learned on new synapses on apical " "segments. If this input is a length-0 array, the " "whole apicalInput is used."), "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, }, "outputs": { "nextPredictedCells": { "description": ("A binary output containing a 1 for every " "cell that is predicted for the next timestep."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "predictedActiveCells": { "description": ("A binary output containing a 1 for every " "cell that transitioned from predicted to active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "activeCells": { "description": ("A binary output containing a 1 for every " "cell that is currently active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": True }, "winnerCells": { "description": ("A binary output containing a 1 for every " "'winner' cell in the TM."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, }, "parameters": { # Input sizes (the network API doesn't provide these during initialize) "columnCount": { "description": ("The size of the 'activeColumns' input " + "(i.e. the number of columns)"), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "apicalInputWidth": { "description": "The size of the 'apicalInput' input", "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "learn": { "description": "True if the TM should learn.", "accessMode": "ReadWrite", "dataType": "Bool", "count": 1, "defaultValue": "true" }, "cellsPerColumn": { "description": "Number of cells per column", "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "activationThreshold": { "description": ("If the number of active connected synapses on a " "segment is at least this threshold, the segment " "is said to be active."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "reducedBasalThreshold": { "description": ("Activation threshold of basal segments for cells " "with active apical segments (with apicalTiebreak " "implementation). "), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "initialPermanence": { "description": "Initial permanence of a new synapse.", "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "connectedPermanence": { "description": ("If the permanence value for a synapse is greater " "than this value, it is said to be connected."), "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "minThreshold": { "description": ("If the number of synapses active on a segment is at " "least this threshold, it is selected as the best " "matching cell in a bursting column."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "sampleSize": { "description": ("The desired number of active synapses for an " + "active cell"), "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "learnOnOneCell": { "description": ("If True, the winner cell for each column will be" " fixed between resets."), "accessMode": "Read", "dataType": "Bool", "count": 1, "defaultValue": "false" }, "maxSynapsesPerSegment": { "description": "The maximum number of synapses per segment", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "maxSegmentsPerCell": { "description": "The maximum number of segments per cell", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "permanenceIncrement": { "description": ("Amount by which permanences of synapses are " "incremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "permanenceDecrement": { "description": ("Amount by which permanences of synapses are " "decremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "basalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "apicalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "seed": { "description": "Seed for the random number generator.", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "implementation": { "description": "Apical implementation", "accessMode": "Read", "dataType": "Byte", "count": 0, "constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"), "defaultValue": "ApicalTiebreakCPP" }, }, } return spec
[ "def", "getSpec", "(", "cls", ")", ":", "spec", "=", "{", "\"description\"", ":", "ApicalTMSequenceRegion", ".", "__doc__", ",", "\"singleNodeOnly\"", ":", "True", ",", "\"inputs\"", ":", "{", "\"activeColumns\"", ":", "{", "\"description\"", ":", "(", "\"An array of 0's and 1's representing the active \"", "\"minicolumns, i.e. the input to the TemporalMemory\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"required\"", ":", "True", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultInput\"", ":", "True", ",", "\"requireSplitterMap\"", ":", "False", "}", ",", "\"resetIn\"", ":", "{", "\"description\"", ":", "(", "\"A boolean flag that indicates whether\"", "\" or not the input vector received in this compute cycle\"", "\" represents the first presentation in a\"", "\" new temporal sequence.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", ",", "\"required\"", ":", "False", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultInput\"", ":", "False", ",", "\"requireSplitterMap\"", ":", "False", "}", ",", "\"apicalInput\"", ":", "{", "\"description\"", ":", "\"An array of 0's and 1's representing top down input.\"", "\" The input will be provided to apical dendrites.\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"required\"", ":", "False", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultInput\"", ":", "False", ",", "\"requireSplitterMap\"", ":", "False", "}", ",", "\"apicalGrowthCandidates\"", ":", "{", "\"description\"", ":", "(", "\"An array of 0's and 1's representing apical input \"", "\"that can be learned on new synapses on apical \"", "\"segments. If this input is a length-0 array, the \"", "\"whole apicalInput is used.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"required\"", ":", "False", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultInput\"", ":", "False", ",", "\"requireSplitterMap\"", ":", "False", "}", ",", "}", ",", "\"outputs\"", ":", "{", "\"nextPredictedCells\"", ":", "{", "\"description\"", ":", "(", "\"A binary output containing a 1 for every \"", "\"cell that is predicted for the next timestep.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "False", "}", ",", "\"predictedActiveCells\"", ":", "{", "\"description\"", ":", "(", "\"A binary output containing a 1 for every \"", "\"cell that transitioned from predicted to active.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "False", "}", ",", "\"activeCells\"", ":", "{", "\"description\"", ":", "(", "\"A binary output containing a 1 for every \"", "\"cell that is currently active.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "True", "}", ",", "\"winnerCells\"", ":", "{", "\"description\"", ":", "(", "\"A binary output containing a 1 for every \"", "\"'winner' cell in the TM.\"", ")", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "0", ",", "\"regionLevel\"", ":", "True", ",", "\"isDefaultOutput\"", ":", "False", "}", ",", "}", ",", "\"parameters\"", ":", "{", "# Input sizes (the network API doesn't provide these during initialize)", "\"columnCount\"", ":", "{", "\"description\"", ":", "(", "\"The size of the 'activeColumns' input \"", "+", "\"(i.e. the number of columns)\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"apicalInputWidth\"", ":", "{", "\"description\"", ":", "\"The size of the 'apicalInput' input\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"learn\"", ":", "{", "\"description\"", ":", "\"True if the TM should learn.\"", ",", "\"accessMode\"", ":", "\"ReadWrite\"", ",", "\"dataType\"", ":", "\"Bool\"", ",", "\"count\"", ":", "1", ",", "\"defaultValue\"", ":", "\"true\"", "}", ",", "\"cellsPerColumn\"", ":", "{", "\"description\"", ":", "\"Number of cells per column\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"activationThreshold\"", ":", "{", "\"description\"", ":", "(", "\"If the number of active connected synapses on a \"", "\"segment is at least this threshold, the segment \"", "\"is said to be active.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"reducedBasalThreshold\"", ":", "{", "\"description\"", ":", "(", "\"Activation threshold of basal segments for cells \"", "\"with active apical segments (with apicalTiebreak \"", "\"implementation). \"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"initialPermanence\"", ":", "{", "\"description\"", ":", "\"Initial permanence of a new synapse.\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"connectedPermanence\"", ":", "{", "\"description\"", ":", "(", "\"If the permanence value for a synapse is greater \"", "\"than this value, it is said to be connected.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"minThreshold\"", ":", "{", "\"description\"", ":", "(", "\"If the number of synapses active on a segment is at \"", "\"least this threshold, it is selected as the best \"", "\"matching cell in a bursting column.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", ",", "\"constraints\"", ":", "\"\"", "}", ",", "\"sampleSize\"", ":", "{", "\"description\"", ":", "(", "\"The desired number of active synapses for an \"", "+", "\"active cell\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", "}", ",", "\"learnOnOneCell\"", ":", "{", "\"description\"", ":", "(", "\"If True, the winner cell for each column will be\"", "\" fixed between resets.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Bool\"", ",", "\"count\"", ":", "1", ",", "\"defaultValue\"", ":", "\"false\"", "}", ",", "\"maxSynapsesPerSegment\"", ":", "{", "\"description\"", ":", "\"The maximum number of synapses per segment\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", "}", ",", "\"maxSegmentsPerCell\"", ":", "{", "\"description\"", ":", "\"The maximum number of segments per cell\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", "}", ",", "\"permanenceIncrement\"", ":", "{", "\"description\"", ":", "(", "\"Amount by which permanences of synapses are \"", "\"incremented during learning.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", "}", ",", "\"permanenceDecrement\"", ":", "{", "\"description\"", ":", "(", "\"Amount by which permanences of synapses are \"", "\"decremented during learning.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", "}", ",", "\"basalPredictedSegmentDecrement\"", ":", "{", "\"description\"", ":", "(", "\"Amount by which active permanences of synapses of \"", "\"previously predicted but inactive segments are \"", "\"decremented.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", "}", ",", "\"apicalPredictedSegmentDecrement\"", ":", "{", "\"description\"", ":", "(", "\"Amount by which active permanences of synapses of \"", "\"previously predicted but inactive segments are \"", "\"decremented.\"", ")", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Real32\"", ",", "\"count\"", ":", "1", "}", ",", "\"seed\"", ":", "{", "\"description\"", ":", "\"Seed for the random number generator.\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"UInt32\"", ",", "\"count\"", ":", "1", "}", ",", "\"implementation\"", ":", "{", "\"description\"", ":", "\"Apical implementation\"", ",", "\"accessMode\"", ":", "\"Read\"", ",", "\"dataType\"", ":", "\"Byte\"", ",", "\"count\"", ":", "0", ",", "\"constraints\"", ":", "(", "\"enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent\"", ")", ",", "\"defaultValue\"", ":", "\"ApicalTiebreakCPP\"", "}", ",", "}", ",", "}", "return", "spec" ]
Return the Spec for ApicalTMSequenceRegion.
[ "Return", "the", "Spec", "for", "ApicalTMSequenceRegion", "." ]
python
train
34.186722
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L989-L1004
def _map_input(self, input_stream): """ Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value. """ for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
[ "def", "_map_input", "(", "self", ",", "input_stream", ")", ":", "for", "record", "in", "self", ".", "reader", "(", "input_stream", ")", ":", "for", "output", "in", "self", ".", "mapper", "(", "*", "record", ")", ":", "yield", "output", "if", "self", ".", "final_mapper", "!=", "NotImplemented", ":", "for", "output", "in", "self", ".", "final_mapper", "(", ")", ":", "yield", "output", "self", ".", "_flush_batch_incr_counter", "(", ")" ]
Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value.
[ "Iterate", "over", "input", "and", "call", "the", "mapper", "for", "each", "item", ".", "If", "the", "job", "has", "a", "parser", "defined", "the", "return", "values", "from", "the", "parser", "will", "be", "passed", "as", "arguments", "to", "the", "mapper", "." ]
python
train
39.9375
HPENetworking/PYHPEIMC
archived/pyhpimc.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/archived/pyhpimc.py#L269-L305
def get_dev_details(ip_address): """Takes string input of IP address to issue RESTUL call to HP IMC :param ip_address: string object of dotted decimal notation of IPv4 address :return: dictionary of device details >>> get_dev_details('10.101.0.1') {'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'} >>> get_dev_details('8.8.8.8') Device not found 'Device not found' """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \ str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false" f_url = url + get_dev_details_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_details = (json.loads(r.text)) if len(dev_details) == 0: print("Device not found") return "Device not found" elif type(dev_details['device']) == list: for i in dev_details['device']: if i['ip'] == ip_address: dev_details = i return dev_details elif type(dev_details['device']) == dict: return dev_details['device'] else: print("dev_details: An Error has occured")
[ "def", "get_dev_details", "(", "ip_address", ")", ":", "# checks to see if the imc credentials are already available", "if", "auth", "is", "None", "or", "url", "is", "None", ":", "set_imc_creds", "(", ")", "global", "r", "get_dev_details_url", "=", "\"/imcrs/plat/res/device?resPrivilegeFilter=false&ip=\"", "+", "str", "(", "ip_address", ")", "+", "\"&start=0&size=1000&orderBy=id&desc=false&total=false\"", "f_url", "=", "url", "+", "get_dev_details_url", "payload", "=", "None", "# creates the URL using the payload variable as the contents", "r", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ")", "# r.status_code", "if", "r", ".", "status_code", "==", "200", ":", "dev_details", "=", "(", "json", ".", "loads", "(", "r", ".", "text", ")", ")", "if", "len", "(", "dev_details", ")", "==", "0", ":", "print", "(", "\"Device not found\"", ")", "return", "\"Device not found\"", "elif", "type", "(", "dev_details", "[", "'device'", "]", ")", "==", "list", ":", "for", "i", "in", "dev_details", "[", "'device'", "]", ":", "if", "i", "[", "'ip'", "]", "==", "ip_address", ":", "dev_details", "=", "i", "return", "dev_details", "elif", "type", "(", "dev_details", "[", "'device'", "]", ")", "==", "dict", ":", "return", "dev_details", "[", "'device'", "]", "else", ":", "print", "(", "\"dev_details: An Error has occured\"", ")" ]
Takes string input of IP address to issue RESTUL call to HP IMC :param ip_address: string object of dotted decimal notation of IPv4 address :return: dictionary of device details >>> get_dev_details('10.101.0.1') {'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'} >>> get_dev_details('8.8.8.8') Device not found 'Device not found'
[ "Takes", "string", "input", "of", "IP", "address", "to", "issue", "RESTUL", "call", "to", "HP", "IMC", ":", "param", "ip_address", ":", "string", "object", "of", "dotted", "decimal", "notation", "of", "IPv4", "address", ":", "return", ":", "dictionary", "of", "device", "details" ]
python
train
54.837838
gem/oq-engine
openquake/hazardlib/gsim/bindi_2017.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/bindi_2017.py#L79-L96
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] mean = (self._get_magnitude_scaling(C, rup.mag) + self._get_distance_scaling(C, dists, rup.mag) + self._get_site_term(C, sites.vs30)) # Mean is returned in terms of m/s^2. Need to convert to g mean -= np.log(g) stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types) return mean + self.adjustment_factor, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extracting dictionary of coefficients specific to required", "# intensity measure type.", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "mean", "=", "(", "self", ".", "_get_magnitude_scaling", "(", "C", ",", "rup", ".", "mag", ")", "+", "self", ".", "_get_distance_scaling", "(", "C", ",", "dists", ",", "rup", ".", "mag", ")", "+", "self", ".", "_get_site_term", "(", "C", ",", "sites", ".", "vs30", ")", ")", "# Mean is returned in terms of m/s^2. Need to convert to g", "mean", "-=", "np", ".", "log", "(", "g", ")", "stddevs", "=", "self", ".", "get_stddevs", "(", "C", ",", "sites", ".", "vs30", ".", "shape", ",", "stddev_types", ")", "return", "mean", "+", "self", ".", "adjustment_factor", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
41.611111
bodylabs/lace
lace/color.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/color.py#L5-L39
def colors_like(color, arr, colormap=DEFAULT_COLORMAP): ''' Given an array of size NxM (usually Nx3), we accept color in the following ways: - A string color name. The accepted names are roughly what's in X11's rgb.txt - An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape - A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color - An array of colors (N, 3) or (3, N) There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple, not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single triangle and not something we ever actually use in practice. ''' import numpy as np from blmath.numerics import is_empty_arraylike if is_empty_arraylike(color): return None if isinstance(color, basestring): from lace.color_names import name_to_rgb color = name_to_rgb[color] elif isinstance(color, list): color = np.array(color) color = np.squeeze(color) num_verts = arr.shape[0] if color.ndim == 1: if color.shape[0] == 3: # rgb triple return np.ones((num_verts, 3)) * np.array([color]) else: from matplotlib import cm return np.ones((num_verts, 3)) * cm.get_cmap(colormap)(color.flatten())[:, :3] elif color.ndim == 2: if color.shape[1] == num_verts: color = color.T return np.ones((num_verts, 3)) * color else: raise ValueError("Colors must be specified as one or two dimensions")
[ "def", "colors_like", "(", "color", ",", "arr", ",", "colormap", "=", "DEFAULT_COLORMAP", ")", ":", "import", "numpy", "as", "np", "from", "blmath", ".", "numerics", "import", "is_empty_arraylike", "if", "is_empty_arraylike", "(", "color", ")", ":", "return", "None", "if", "isinstance", "(", "color", ",", "basestring", ")", ":", "from", "lace", ".", "color_names", "import", "name_to_rgb", "color", "=", "name_to_rgb", "[", "color", "]", "elif", "isinstance", "(", "color", ",", "list", ")", ":", "color", "=", "np", ".", "array", "(", "color", ")", "color", "=", "np", ".", "squeeze", "(", "color", ")", "num_verts", "=", "arr", ".", "shape", "[", "0", "]", "if", "color", ".", "ndim", "==", "1", ":", "if", "color", ".", "shape", "[", "0", "]", "==", "3", ":", "# rgb triple", "return", "np", ".", "ones", "(", "(", "num_verts", ",", "3", ")", ")", "*", "np", ".", "array", "(", "[", "color", "]", ")", "else", ":", "from", "matplotlib", "import", "cm", "return", "np", ".", "ones", "(", "(", "num_verts", ",", "3", ")", ")", "*", "cm", ".", "get_cmap", "(", "colormap", ")", "(", "color", ".", "flatten", "(", ")", ")", "[", ":", ",", ":", "3", "]", "elif", "color", ".", "ndim", "==", "2", ":", "if", "color", ".", "shape", "[", "1", "]", "==", "num_verts", ":", "color", "=", "color", ".", "T", "return", "np", ".", "ones", "(", "(", "num_verts", ",", "3", ")", ")", "*", "color", "else", ":", "raise", "ValueError", "(", "\"Colors must be specified as one or two dimensions\"", ")" ]
Given an array of size NxM (usually Nx3), we accept color in the following ways: - A string color name. The accepted names are roughly what's in X11's rgb.txt - An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape - A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color - An array of colors (N, 3) or (3, N) There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple, not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single triangle and not something we ever actually use in practice.
[ "Given", "an", "array", "of", "size", "NxM", "(", "usually", "Nx3", ")", "we", "accept", "color", "in", "the", "following", "ways", ":", "-", "A", "string", "color", "name", ".", "The", "accepted", "names", "are", "roughly", "what", "s", "in", "X11", "s", "rgb", ".", "txt", "-", "An", "explicit", "rgb", "triple", "in", "(", "3", ")", "(", "3", "1", ")", "or", "(", "1", "3", ")", "shape", "-", "A", "list", "of", "values", "(", "N", ")", "(", "N", "1", ")", "or", "(", "1", "N", ")", "that", "are", "put", "through", "a", "colormap", "to", "get", "per", "vertex", "color", "-", "An", "array", "of", "colors", "(", "N", "3", ")", "or", "(", "3", "N", ")" ]
python
train
44.885714
streamlink/streamlink
src/streamlink/plugins/funimationnow.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/funimationnow.py#L127-L135
def sources(self): """ Get the sources for a given experience_id, which is tied to a specific language :param experience_id: int; video content id :return: sources dict """ api_url = self.sources_api_url.format(experience_id=self.experience_id) res = self.get(api_url, params={"pinst_id": self.pinst_id}) return self.session.http.json(res)
[ "def", "sources", "(", "self", ")", ":", "api_url", "=", "self", ".", "sources_api_url", ".", "format", "(", "experience_id", "=", "self", ".", "experience_id", ")", "res", "=", "self", ".", "get", "(", "api_url", ",", "params", "=", "{", "\"pinst_id\"", ":", "self", ".", "pinst_id", "}", ")", "return", "self", ".", "session", ".", "http", ".", "json", "(", "res", ")" ]
Get the sources for a given experience_id, which is tied to a specific language :param experience_id: int; video content id :return: sources dict
[ "Get", "the", "sources", "for", "a", "given", "experience_id", "which", "is", "tied", "to", "a", "specific", "language", ":", "param", "experience_id", ":", "int", ";", "video", "content", "id", ":", "return", ":", "sources", "dict" ]
python
test
43.888889
pudo/dataset
dataset/table.py
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L369-L388
def drop_column(self, name): """Drop the column ``name``. :: table.drop_column('created_at') """ if self.db.engine.dialect.name == 'sqlite': raise RuntimeError("SQLite does not support dropping columns.") name = normalize_column_name(name) with self.db.lock: if not self.exists or not self.has_column(name): log.debug("Column does not exist: %s", name) return self._threading_warn() self.db.op.drop_column( self.table.name, name, self.table.schema ) self._reflect_table()
[ "def", "drop_column", "(", "self", ",", "name", ")", ":", "if", "self", ".", "db", ".", "engine", ".", "dialect", ".", "name", "==", "'sqlite'", ":", "raise", "RuntimeError", "(", "\"SQLite does not support dropping columns.\"", ")", "name", "=", "normalize_column_name", "(", "name", ")", "with", "self", ".", "db", ".", "lock", ":", "if", "not", "self", ".", "exists", "or", "not", "self", ".", "has_column", "(", "name", ")", ":", "log", ".", "debug", "(", "\"Column does not exist: %s\"", ",", "name", ")", "return", "self", ".", "_threading_warn", "(", ")", "self", ".", "db", ".", "op", ".", "drop_column", "(", "self", ".", "table", ".", "name", ",", "name", ",", "self", ".", "table", ".", "schema", ")", "self", ".", "_reflect_table", "(", ")" ]
Drop the column ``name``. :: table.drop_column('created_at')
[ "Drop", "the", "column", "name", ".", "::", "table", ".", "drop_column", "(", "created_at", ")" ]
python
train
33.25
pybel/pybel-tools
src/pybel_tools/analysis/heat.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/analysis/heat.py#L369-L399
def get_random_edge(self): """This function should be run when there are no leaves, but there are still unscored nodes. It will introduce a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score for the network. This means that the score can be averaged over many runs for a given graph, and a better data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges have been disregarded, later) 1. get all un-scored 2. rank by in-degree 3. weighted probability over all in-edges where lower in-degree means higher probability 4. pick randomly which edge :return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key) :rtype: tuple """ nodes = [ (n, self.in_out_ratio(n)) for n in self.unscored_nodes_iter() if n != self.target_node ] node, deg = min(nodes, key=itemgetter(1)) log.log(5, 'checking %s (in/out ratio: %.3f)', node, deg) possible_edges = self.graph.in_edges(node, keys=True) log.log(5, 'possible edges: %s', possible_edges) edge_to_remove = random.choice(possible_edges) log.log(5, 'chose: %s', edge_to_remove) return edge_to_remove
[ "def", "get_random_edge", "(", "self", ")", ":", "nodes", "=", "[", "(", "n", ",", "self", ".", "in_out_ratio", "(", "n", ")", ")", "for", "n", "in", "self", ".", "unscored_nodes_iter", "(", ")", "if", "n", "!=", "self", ".", "target_node", "]", "node", ",", "deg", "=", "min", "(", "nodes", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "log", ".", "log", "(", "5", ",", "'checking %s (in/out ratio: %.3f)'", ",", "node", ",", "deg", ")", "possible_edges", "=", "self", ".", "graph", ".", "in_edges", "(", "node", ",", "keys", "=", "True", ")", "log", ".", "log", "(", "5", ",", "'possible edges: %s'", ",", "possible_edges", ")", "edge_to_remove", "=", "random", ".", "choice", "(", "possible_edges", ")", "log", ".", "log", "(", "5", ",", "'chose: %s'", ",", "edge_to_remove", ")", "return", "edge_to_remove" ]
This function should be run when there are no leaves, but there are still unscored nodes. It will introduce a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score for the network. This means that the score can be averaged over many runs for a given graph, and a better data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges have been disregarded, later) 1. get all un-scored 2. rank by in-degree 3. weighted probability over all in-edges where lower in-degree means higher probability 4. pick randomly which edge :return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key) :rtype: tuple
[ "This", "function", "should", "be", "run", "when", "there", "are", "no", "leaves", "but", "there", "are", "still", "unscored", "nodes", ".", "It", "will", "introduce", "a", "probabilistic", "element", "to", "the", "algorithm", "where", "some", "edges", "are", "disregarded", "randomly", "to", "eventually", "get", "a", "score", "for", "the", "network", ".", "This", "means", "that", "the", "score", "can", "be", "averaged", "over", "many", "runs", "for", "a", "given", "graph", "and", "a", "better", "data", "structure", "will", "have", "to", "be", "later", "developed", "that", "doesn", "t", "destroy", "the", "graph", "(", "instead", "annotates", "which", "edges", "have", "been", "disregarded", "later", ")" ]
python
valid
44.451613
berkeley-cocosci/Wallace
wallace/command_line.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L83-L219
def setup_experiment(debug=True, verbose=False, app=None): """Check the app and, if it's compatible with Wallace, freeze its state.""" print_header() # Verify that the package is usable. log("Verifying that directory is compatible with Wallace...") if not verify_package(verbose=verbose): raise AssertionError( "This is not a valid Wallace app. " + "Fix the errors and then try running 'wallace verify'.") # Verify that the Postgres server is running. try: psycopg2.connect(database="x", user="postgres", password="nada") except psycopg2.OperationalError, e: if "could not connect to server" in str(e): raise RuntimeError("The Postgres server isn't running.") # Load psiTurk configuration. config = PsiturkConfig() config.load_config() # Check that the demo-specific requirements are satisfied. try: with open("requirements.txt", "r") as f: dependencies = f.readlines() except: dependencies = [] pkg_resources.require(dependencies) # Generate a unique id for this experiment. id = "w" + str(uuid.uuid4())[0:28] # If the user provided an app name, use it everywhere that's user-facing. if app: id_long = id id = str(app) log("Running as experiment " + id + "...") # Copy this directory into a temporary folder, ignoring .git dst = os.path.join(tempfile.mkdtemp(), id) to_ignore = shutil.ignore_patterns( ".git/*", "*.db", "snapshots", "data", "server.log" ) shutil.copytree(os.getcwd(), dst, ignore=to_ignore) click.echo(dst) # Save the experiment id with open(os.path.join(dst, "experiment_id.txt"), "w") as file: if app: file.write(id_long) else: file.write(id) # Zip up the temporary directory and place it in the cwd. if not debug: log("Freezing the experiment package...") shutil.make_archive( os.path.join("snapshots", id + "-code"), "zip", dst) # Change directory to the temporary folder. cwd = os.getcwd() os.chdir(dst) # Check directories. if not os.path.exists("static/scripts"): os.makedirs("static/scripts") if not os.path.exists("templates"): os.makedirs("templates") if not os.path.exists("static/css"): os.makedirs("static/css") # Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict. os.rename( os.path.join(dst, "experiment.py"), os.path.join(dst, "wallace_experiment.py")) # Copy files into this experiment package. src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "custom.py") shutil.copy(src, os.path.join(dst, "custom.py")) heroku_files = [ "Procfile", "requirements.txt", "psiturkapp.py", "worker.py", "clock.py", ] for filename in heroku_files: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "heroku", filename) shutil.copy(src, os.path.join(dst, filename)) clock_on = config.getboolean('Server Parameters', 'clock_on') # If the clock process has been disabled, overwrite the Procfile. if not clock_on: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "heroku", "Procfile_no_clock") shutil.copy(src, os.path.join(dst, "Procfile")) frontend_files = [ "static/css/wallace.css", "static/scripts/wallace.js", "static/scripts/reqwest.min.js", "templates/error_wallace.html", "templates/launch.html", "templates/complete.html", "static/robots.txt" ] for filename in frontend_files: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "frontend", filename) shutil.copy(src, os.path.join(dst, filename)) time.sleep(0.25) os.chdir(cwd) return (id, dst)
[ "def", "setup_experiment", "(", "debug", "=", "True", ",", "verbose", "=", "False", ",", "app", "=", "None", ")", ":", "print_header", "(", ")", "# Verify that the package is usable.", "log", "(", "\"Verifying that directory is compatible with Wallace...\"", ")", "if", "not", "verify_package", "(", "verbose", "=", "verbose", ")", ":", "raise", "AssertionError", "(", "\"This is not a valid Wallace app. \"", "+", "\"Fix the errors and then try running 'wallace verify'.\"", ")", "# Verify that the Postgres server is running.", "try", ":", "psycopg2", ".", "connect", "(", "database", "=", "\"x\"", ",", "user", "=", "\"postgres\"", ",", "password", "=", "\"nada\"", ")", "except", "psycopg2", ".", "OperationalError", ",", "e", ":", "if", "\"could not connect to server\"", "in", "str", "(", "e", ")", ":", "raise", "RuntimeError", "(", "\"The Postgres server isn't running.\"", ")", "# Load psiTurk configuration.", "config", "=", "PsiturkConfig", "(", ")", "config", ".", "load_config", "(", ")", "# Check that the demo-specific requirements are satisfied.", "try", ":", "with", "open", "(", "\"requirements.txt\"", ",", "\"r\"", ")", "as", "f", ":", "dependencies", "=", "f", ".", "readlines", "(", ")", "except", ":", "dependencies", "=", "[", "]", "pkg_resources", ".", "require", "(", "dependencies", ")", "# Generate a unique id for this experiment.", "id", "=", "\"w\"", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "[", "0", ":", "28", "]", "# If the user provided an app name, use it everywhere that's user-facing.", "if", "app", ":", "id_long", "=", "id", "id", "=", "str", "(", "app", ")", "log", "(", "\"Running as experiment \"", "+", "id", "+", "\"...\"", ")", "# Copy this directory into a temporary folder, ignoring .git", "dst", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "mkdtemp", "(", ")", ",", "id", ")", "to_ignore", "=", "shutil", ".", "ignore_patterns", "(", "\".git/*\"", ",", "\"*.db\"", ",", "\"snapshots\"", ",", "\"data\"", ",", "\"server.log\"", ")", "shutil", ".", "copytree", "(", "os", ".", "getcwd", "(", ")", ",", "dst", ",", "ignore", "=", "to_ignore", ")", "click", ".", "echo", "(", "dst", ")", "# Save the experiment id", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dst", ",", "\"experiment_id.txt\"", ")", ",", "\"w\"", ")", "as", "file", ":", "if", "app", ":", "file", ".", "write", "(", "id_long", ")", "else", ":", "file", ".", "write", "(", "id", ")", "# Zip up the temporary directory and place it in the cwd.", "if", "not", "debug", ":", "log", "(", "\"Freezing the experiment package...\"", ")", "shutil", ".", "make_archive", "(", "os", ".", "path", ".", "join", "(", "\"snapshots\"", ",", "id", "+", "\"-code\"", ")", ",", "\"zip\"", ",", "dst", ")", "# Change directory to the temporary folder.", "cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "dst", ")", "# Check directories.", "if", "not", "os", ".", "path", ".", "exists", "(", "\"static/scripts\"", ")", ":", "os", ".", "makedirs", "(", "\"static/scripts\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "\"templates\"", ")", ":", "os", ".", "makedirs", "(", "\"templates\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "\"static/css\"", ")", ":", "os", ".", "makedirs", "(", "\"static/css\"", ")", "# Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict.", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "dst", ",", "\"experiment.py\"", ")", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "\"wallace_experiment.py\"", ")", ")", "# Copy files into this experiment package.", "src", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"custom.py\"", ")", "shutil", ".", "copy", "(", "src", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "\"custom.py\"", ")", ")", "heroku_files", "=", "[", "\"Procfile\"", ",", "\"requirements.txt\"", ",", "\"psiturkapp.py\"", ",", "\"worker.py\"", ",", "\"clock.py\"", ",", "]", "for", "filename", "in", "heroku_files", ":", "src", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"heroku\"", ",", "filename", ")", "shutil", ".", "copy", "(", "src", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "filename", ")", ")", "clock_on", "=", "config", ".", "getboolean", "(", "'Server Parameters'", ",", "'clock_on'", ")", "# If the clock process has been disabled, overwrite the Procfile.", "if", "not", "clock_on", ":", "src", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"heroku\"", ",", "\"Procfile_no_clock\"", ")", "shutil", ".", "copy", "(", "src", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "\"Procfile\"", ")", ")", "frontend_files", "=", "[", "\"static/css/wallace.css\"", ",", "\"static/scripts/wallace.js\"", ",", "\"static/scripts/reqwest.min.js\"", ",", "\"templates/error_wallace.html\"", ",", "\"templates/launch.html\"", ",", "\"templates/complete.html\"", ",", "\"static/robots.txt\"", "]", "for", "filename", "in", "frontend_files", ":", "src", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"frontend\"", ",", "filename", ")", "shutil", ".", "copy", "(", "src", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "filename", ")", ")", "time", ".", "sleep", "(", "0.25", ")", "os", ".", "chdir", "(", "cwd", ")", "return", "(", "id", ",", "dst", ")" ]
Check the app and, if it's compatible with Wallace, freeze its state.
[ "Check", "the", "app", "and", "if", "it", "s", "compatible", "with", "Wallace", "freeze", "its", "state", "." ]
python
train
28.992701
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L3332-L3348
def copy(self): """ Return a new :class:`~pywbem.CIMClassName` object that is a copy of this CIM class path. Objects of this class have no mutable types in any attributes, so modifications of the original object will not affect the returned copy, and vice versa. Note that the Python functions :func:`py:copy.copy` and :func:`py:copy.deepcopy` can be used to create completely shallow or completely deep copies of objects of this class. """ return CIMClassName( self.classname, host=self.host, namespace=self.namespace)
[ "def", "copy", "(", "self", ")", ":", "return", "CIMClassName", "(", "self", ".", "classname", ",", "host", "=", "self", ".", "host", ",", "namespace", "=", "self", ".", "namespace", ")" ]
Return a new :class:`~pywbem.CIMClassName` object that is a copy of this CIM class path. Objects of this class have no mutable types in any attributes, so modifications of the original object will not affect the returned copy, and vice versa. Note that the Python functions :func:`py:copy.copy` and :func:`py:copy.deepcopy` can be used to create completely shallow or completely deep copies of objects of this class.
[ "Return", "a", "new", ":", "class", ":", "~pywbem", ".", "CIMClassName", "object", "that", "is", "a", "copy", "of", "this", "CIM", "class", "path", "." ]
python
train
37
uyar/pygenstub
pygenstub.py
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L102-L111
def extract_signature(docstring): """Extract the signature from a docstring. :sig: (str) -> Optional[str] :param docstring: Docstring to extract the signature from. :return: Extracted signature, or ``None`` if there's no signature. """ root = publish_doctree(docstring, settings_overrides={"report_level": 5}) fields = get_fields(root) return fields.get(SIG_FIELD)
[ "def", "extract_signature", "(", "docstring", ")", ":", "root", "=", "publish_doctree", "(", "docstring", ",", "settings_overrides", "=", "{", "\"report_level\"", ":", "5", "}", ")", "fields", "=", "get_fields", "(", "root", ")", "return", "fields", ".", "get", "(", "SIG_FIELD", ")" ]
Extract the signature from a docstring. :sig: (str) -> Optional[str] :param docstring: Docstring to extract the signature from. :return: Extracted signature, or ``None`` if there's no signature.
[ "Extract", "the", "signature", "from", "a", "docstring", "." ]
python
train
38.8
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L1195-L1245
def search_directory(self, **kwargs): """ SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos" """ search_response = self.request('SearchDirectory', kwargs) result = {} items = { "account": zobjects.Account.from_dict, "domain": zobjects.Domain.from_dict, "dl": zobjects.DistributionList.from_dict, "cos": zobjects.COS.from_dict, "calresource": zobjects.CalendarResource.from_dict # "alias": TODO, } for obj_type, func in items.items(): if obj_type in search_response: if isinstance(search_response[obj_type], list): result[obj_type] = [ func(v) for v in search_response[obj_type]] else: result[obj_type] = func(search_response[obj_type]) return result
[ "def", "search_directory", "(", "self", ",", "*", "*", "kwargs", ")", ":", "search_response", "=", "self", ".", "request", "(", "'SearchDirectory'", ",", "kwargs", ")", "result", "=", "{", "}", "items", "=", "{", "\"account\"", ":", "zobjects", ".", "Account", ".", "from_dict", ",", "\"domain\"", ":", "zobjects", ".", "Domain", ".", "from_dict", ",", "\"dl\"", ":", "zobjects", ".", "DistributionList", ".", "from_dict", ",", "\"cos\"", ":", "zobjects", ".", "COS", ".", "from_dict", ",", "\"calresource\"", ":", "zobjects", ".", "CalendarResource", ".", "from_dict", "# \"alias\": TODO,", "}", "for", "obj_type", ",", "func", "in", "items", ".", "items", "(", ")", ":", "if", "obj_type", "in", "search_response", ":", "if", "isinstance", "(", "search_response", "[", "obj_type", "]", ",", "list", ")", ":", "result", "[", "obj_type", "]", "=", "[", "func", "(", "v", ")", "for", "v", "in", "search_response", "[", "obj_type", "]", "]", "else", ":", "result", "[", "obj_type", "]", "=", "func", "(", "search_response", "[", "obj_type", "]", ")", "return", "result" ]
SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos"
[ "SearchAccount", "is", "deprecated", "using", "SearchDirectory" ]
python
train
43.27451
Pelagicore/qface
qface/idl/domain.py
https://github.com/Pelagicore/qface/blob/7f60e91e3a91a7cb04cfacbc9ce80f43df444853/qface/idl/domain.py#L146-L149
def add_tag(self, tag): """ add a tag to the tag list """ if tag not in self._tags: self._tags[tag] = dict()
[ "def", "add_tag", "(", "self", ",", "tag", ")", ":", "if", "tag", "not", "in", "self", ".", "_tags", ":", "self", ".", "_tags", "[", "tag", "]", "=", "dict", "(", ")" ]
add a tag to the tag list
[ "add", "a", "tag", "to", "the", "tag", "list" ]
python
train
33.25
merll/docker-map
dockermap/map/config/main.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/config/main.py#L332-L428
def dependency_items(self): """ Generates all containers' dependencies, i.e. an iterator on tuples in the format ``(container_name, used_containers)``, whereas the used containers are a set, and can be empty. :return: Container dependencies. :rtype: collections.Iterable """ def _get_used_items_np(u): volume_config_name, __, volume_instance = u.name.partition('.') attaching_config_name = attaching.get(volume_config_name) if attaching_config_name: used_c_name = attaching_config_name used_instances = instances.get(attaching_config_name) else: used_c_name = volume_config_name if volume_instance: used_instances = (volume_instance, ) else: used_instances = instances.get(volume_config_name) return [MapConfigId(ItemType.CONTAINER, self._name, used_c_name, ai) for ai in used_instances or (None, )] def _get_used_items_ap(u): volume_config_name, __, volume_instance = u.name.partition('.') attaching_config = ext_map.get_existing(volume_config_name) attaching_instances = instances.get(volume_config_name) config_volumes = {a.name for a in attaching_config.attaches} if not volume_instance or volume_instance in config_volumes: used_instances = attaching_instances else: used_instances = (volume_instance, ) return [MapConfigId(ItemType.CONTAINER, self._name, volume_config_name, ai) for ai in used_instances or (None, )] def _get_linked_items(lc): linked_config_name, __, linked_instance = lc.partition('.') if linked_instance: linked_instances = (linked_instance, ) else: linked_instances = instances.get(linked_config_name) return [MapConfigId(ItemType.CONTAINER, self._name, linked_config_name, li) for li in linked_instances or (None, )] def _get_network_mode_items(n): net_config_name, net_instance = n network_ref_config = ext_map.get_existing(net_config_name) if network_ref_config: if net_instance and net_instance in network_ref_config.instances: network_instances = (net_instance, ) else: network_instances = network_ref_config.instances or (None, ) return [MapConfigId(ItemType.CONTAINER, self._name, net_config_name, ni) for ni in network_instances] return [] def _get_network_items(n): if n.network_name in DEFAULT_PRESET_NETWORKS: return [] net_items = [MapConfigId(ItemType.NETWORK, self._name, n.network_name)] if n.links: net_items.extend(itertools.chain.from_iterable(_get_linked_items(l.container) for l in n.links)) return net_items if self._extended: ext_map = self else: ext_map = self.get_extended_map() instances = {c_name: c_config.instances for c_name, c_config in ext_map} if not self.use_attached_parent_name: attaching = {attaches.name: c_name for c_name, c_config in ext_map for attaches in c_config.attaches} used_func = _get_used_items_np else: used_func = _get_used_items_ap def _get_dep_list(name, config): image, tag = self.get_image(config.image or name) d = [] nw = config.network_mode if isinstance(nw, tuple): merge_list(d, _get_network_mode_items(nw)) merge_list(d, itertools.chain.from_iterable(map(_get_network_items, config.networks))) merge_list(d, itertools.chain.from_iterable(map(used_func, config.uses))) merge_list(d, itertools.chain.from_iterable(_get_linked_items(l.container) for l in config.links)) d.extend(MapConfigId(ItemType.VOLUME, self._name, name, a.name) for a in config.attaches) d.append(MapConfigId(ItemType.IMAGE, self._name, image, tag)) return d for c_name, c_config in ext_map: dep_list = _get_dep_list(c_name, c_config) for c_instance in c_config.instances or (None, ): yield MapConfigId(ItemType.CONTAINER, self._name, c_name, c_instance), dep_list
[ "def", "dependency_items", "(", "self", ")", ":", "def", "_get_used_items_np", "(", "u", ")", ":", "volume_config_name", ",", "__", ",", "volume_instance", "=", "u", ".", "name", ".", "partition", "(", "'.'", ")", "attaching_config_name", "=", "attaching", ".", "get", "(", "volume_config_name", ")", "if", "attaching_config_name", ":", "used_c_name", "=", "attaching_config_name", "used_instances", "=", "instances", ".", "get", "(", "attaching_config_name", ")", "else", ":", "used_c_name", "=", "volume_config_name", "if", "volume_instance", ":", "used_instances", "=", "(", "volume_instance", ",", ")", "else", ":", "used_instances", "=", "instances", ".", "get", "(", "volume_config_name", ")", "return", "[", "MapConfigId", "(", "ItemType", ".", "CONTAINER", ",", "self", ".", "_name", ",", "used_c_name", ",", "ai", ")", "for", "ai", "in", "used_instances", "or", "(", "None", ",", ")", "]", "def", "_get_used_items_ap", "(", "u", ")", ":", "volume_config_name", ",", "__", ",", "volume_instance", "=", "u", ".", "name", ".", "partition", "(", "'.'", ")", "attaching_config", "=", "ext_map", ".", "get_existing", "(", "volume_config_name", ")", "attaching_instances", "=", "instances", ".", "get", "(", "volume_config_name", ")", "config_volumes", "=", "{", "a", ".", "name", "for", "a", "in", "attaching_config", ".", "attaches", "}", "if", "not", "volume_instance", "or", "volume_instance", "in", "config_volumes", ":", "used_instances", "=", "attaching_instances", "else", ":", "used_instances", "=", "(", "volume_instance", ",", ")", "return", "[", "MapConfigId", "(", "ItemType", ".", "CONTAINER", ",", "self", ".", "_name", ",", "volume_config_name", ",", "ai", ")", "for", "ai", "in", "used_instances", "or", "(", "None", ",", ")", "]", "def", "_get_linked_items", "(", "lc", ")", ":", "linked_config_name", ",", "__", ",", "linked_instance", "=", "lc", ".", "partition", "(", "'.'", ")", "if", "linked_instance", ":", "linked_instances", "=", "(", "linked_instance", ",", ")", "else", ":", "linked_instances", "=", "instances", ".", "get", "(", "linked_config_name", ")", "return", "[", "MapConfigId", "(", "ItemType", ".", "CONTAINER", ",", "self", ".", "_name", ",", "linked_config_name", ",", "li", ")", "for", "li", "in", "linked_instances", "or", "(", "None", ",", ")", "]", "def", "_get_network_mode_items", "(", "n", ")", ":", "net_config_name", ",", "net_instance", "=", "n", "network_ref_config", "=", "ext_map", ".", "get_existing", "(", "net_config_name", ")", "if", "network_ref_config", ":", "if", "net_instance", "and", "net_instance", "in", "network_ref_config", ".", "instances", ":", "network_instances", "=", "(", "net_instance", ",", ")", "else", ":", "network_instances", "=", "network_ref_config", ".", "instances", "or", "(", "None", ",", ")", "return", "[", "MapConfigId", "(", "ItemType", ".", "CONTAINER", ",", "self", ".", "_name", ",", "net_config_name", ",", "ni", ")", "for", "ni", "in", "network_instances", "]", "return", "[", "]", "def", "_get_network_items", "(", "n", ")", ":", "if", "n", ".", "network_name", "in", "DEFAULT_PRESET_NETWORKS", ":", "return", "[", "]", "net_items", "=", "[", "MapConfigId", "(", "ItemType", ".", "NETWORK", ",", "self", ".", "_name", ",", "n", ".", "network_name", ")", "]", "if", "n", ".", "links", ":", "net_items", ".", "extend", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "_get_linked_items", "(", "l", ".", "container", ")", "for", "l", "in", "n", ".", "links", ")", ")", "return", "net_items", "if", "self", ".", "_extended", ":", "ext_map", "=", "self", "else", ":", "ext_map", "=", "self", ".", "get_extended_map", "(", ")", "instances", "=", "{", "c_name", ":", "c_config", ".", "instances", "for", "c_name", ",", "c_config", "in", "ext_map", "}", "if", "not", "self", ".", "use_attached_parent_name", ":", "attaching", "=", "{", "attaches", ".", "name", ":", "c_name", "for", "c_name", ",", "c_config", "in", "ext_map", "for", "attaches", "in", "c_config", ".", "attaches", "}", "used_func", "=", "_get_used_items_np", "else", ":", "used_func", "=", "_get_used_items_ap", "def", "_get_dep_list", "(", "name", ",", "config", ")", ":", "image", ",", "tag", "=", "self", ".", "get_image", "(", "config", ".", "image", "or", "name", ")", "d", "=", "[", "]", "nw", "=", "config", ".", "network_mode", "if", "isinstance", "(", "nw", ",", "tuple", ")", ":", "merge_list", "(", "d", ",", "_get_network_mode_items", "(", "nw", ")", ")", "merge_list", "(", "d", ",", "itertools", ".", "chain", ".", "from_iterable", "(", "map", "(", "_get_network_items", ",", "config", ".", "networks", ")", ")", ")", "merge_list", "(", "d", ",", "itertools", ".", "chain", ".", "from_iterable", "(", "map", "(", "used_func", ",", "config", ".", "uses", ")", ")", ")", "merge_list", "(", "d", ",", "itertools", ".", "chain", ".", "from_iterable", "(", "_get_linked_items", "(", "l", ".", "container", ")", "for", "l", "in", "config", ".", "links", ")", ")", "d", ".", "extend", "(", "MapConfigId", "(", "ItemType", ".", "VOLUME", ",", "self", ".", "_name", ",", "name", ",", "a", ".", "name", ")", "for", "a", "in", "config", ".", "attaches", ")", "d", ".", "append", "(", "MapConfigId", "(", "ItemType", ".", "IMAGE", ",", "self", ".", "_name", ",", "image", ",", "tag", ")", ")", "return", "d", "for", "c_name", ",", "c_config", "in", "ext_map", ":", "dep_list", "=", "_get_dep_list", "(", "c_name", ",", "c_config", ")", "for", "c_instance", "in", "c_config", ".", "instances", "or", "(", "None", ",", ")", ":", "yield", "MapConfigId", "(", "ItemType", ".", "CONTAINER", ",", "self", ".", "_name", ",", "c_name", ",", "c_instance", ")", ",", "dep_list" ]
Generates all containers' dependencies, i.e. an iterator on tuples in the format ``(container_name, used_containers)``, whereas the used containers are a set, and can be empty. :return: Container dependencies. :rtype: collections.Iterable
[ "Generates", "all", "containers", "dependencies", "i", ".", "e", ".", "an", "iterator", "on", "tuples", "in", "the", "format", "(", "container_name", "used_containers", ")", "whereas", "the", "used", "containers", "are", "a", "set", "and", "can", "be", "empty", "." ]
python
train
47.226804
pudo/normality
normality/__init__.py
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/__init__.py#L60-L69
def slugify(text, sep='-'): """A simple slug generator.""" text = stringify(text) if text is None: return None text = text.replace(sep, WS) text = normalize(text, ascii=True) if text is None: return None return text.replace(WS, sep)
[ "def", "slugify", "(", "text", ",", "sep", "=", "'-'", ")", ":", "text", "=", "stringify", "(", "text", ")", "if", "text", "is", "None", ":", "return", "None", "text", "=", "text", ".", "replace", "(", "sep", ",", "WS", ")", "text", "=", "normalize", "(", "text", ",", "ascii", "=", "True", ")", "if", "text", "is", "None", ":", "return", "None", "return", "text", ".", "replace", "(", "WS", ",", "sep", ")" ]
A simple slug generator.
[ "A", "simple", "slug", "generator", "." ]
python
train
26.7
fermiPy/fermipy
fermipy/diffuse/model_manager.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/model_manager.py#L455-L469
def get_sub_comp_info(source_info, comp): """Build and return information about a sub-component for a particular selection """ sub_comps = source_info.get('components', None) if sub_comps is None: return source_info.copy() moving = source_info.get('moving', False) selection_dependent = source_info.get('selection_dependent', False) if selection_dependent: key = comp.make_key('{ebin_name}_{evtype_name}') elif moving: key = "zmax%i" % comp.zmax ret_dict = source_info.copy() ret_dict.update(sub_comps[key]) return ret_dict
[ "def", "get_sub_comp_info", "(", "source_info", ",", "comp", ")", ":", "sub_comps", "=", "source_info", ".", "get", "(", "'components'", ",", "None", ")", "if", "sub_comps", "is", "None", ":", "return", "source_info", ".", "copy", "(", ")", "moving", "=", "source_info", ".", "get", "(", "'moving'", ",", "False", ")", "selection_dependent", "=", "source_info", ".", "get", "(", "'selection_dependent'", ",", "False", ")", "if", "selection_dependent", ":", "key", "=", "comp", ".", "make_key", "(", "'{ebin_name}_{evtype_name}'", ")", "elif", "moving", ":", "key", "=", "\"zmax%i\"", "%", "comp", ".", "zmax", "ret_dict", "=", "source_info", ".", "copy", "(", ")", "ret_dict", ".", "update", "(", "sub_comps", "[", "key", "]", ")", "return", "ret_dict" ]
Build and return information about a sub-component for a particular selection
[ "Build", "and", "return", "information", "about", "a", "sub", "-", "component", "for", "a", "particular", "selection" ]
python
train
42.2
rootpy/rootpy
rootpy/plotting/hist.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L1260-L1273
def set_sum_w2(self, w, ix, iy=0, iz=0): """ Sets the true number of entries in the bin weighted by w^2 """ if self.GetSumw2N() == 0: raise RuntimeError( "Attempting to access Sumw2 in histogram " "where weights were not stored") xl = self.nbins(axis=0, overflow=True) yl = self.nbins(axis=1, overflow=True) idx = xl * yl * iz + xl * iy + ix if not 0 <= idx < self.GetSumw2N(): raise IndexError("bin index out of range") self.GetSumw2().SetAt(w, idx)
[ "def", "set_sum_w2", "(", "self", ",", "w", ",", "ix", ",", "iy", "=", "0", ",", "iz", "=", "0", ")", ":", "if", "self", ".", "GetSumw2N", "(", ")", "==", "0", ":", "raise", "RuntimeError", "(", "\"Attempting to access Sumw2 in histogram \"", "\"where weights were not stored\"", ")", "xl", "=", "self", ".", "nbins", "(", "axis", "=", "0", ",", "overflow", "=", "True", ")", "yl", "=", "self", ".", "nbins", "(", "axis", "=", "1", ",", "overflow", "=", "True", ")", "idx", "=", "xl", "*", "yl", "*", "iz", "+", "xl", "*", "iy", "+", "ix", "if", "not", "0", "<=", "idx", "<", "self", ".", "GetSumw2N", "(", ")", ":", "raise", "IndexError", "(", "\"bin index out of range\"", ")", "self", ".", "GetSumw2", "(", ")", ".", "SetAt", "(", "w", ",", "idx", ")" ]
Sets the true number of entries in the bin weighted by w^2
[ "Sets", "the", "true", "number", "of", "entries", "in", "the", "bin", "weighted", "by", "w^2" ]
python
train
40.357143
hyperledger/sawtooth-core
validator/sawtooth_validator/execution/execution_context.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/execution_context.py#L230-L242
def create_prefetch(self, addresses): """Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain). """ with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
[ "def", "create_prefetch", "(", "self", ",", "addresses", ")", ":", "with", "self", ".", "_lock", ":", "for", "add", "in", "addresses", ":", "self", ".", "_state", "[", "add", "]", "=", "_ContextFuture", "(", "address", "=", "add", ",", "wait_for_tree", "=", "True", ")" ]
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
[ "Create", "futures", "needed", "before", "starting", "the", "process", "of", "reading", "the", "address", "s", "value", "from", "the", "merkle", "tree", "." ]
python
train
38.846154
scanny/python-pptx
pptx/oxml/xmlchemy.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/xmlchemy.py#L21-L30
def OxmlElement(nsptag_str, nsmap=None): """ Return a 'loose' lxml element having the tag specified by *nsptag_str*. *nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'. The resulting element is an instance of the custom element class for this tag name if one is defined. """ nsptag = NamespacePrefixedTag(nsptag_str) nsmap = nsmap if nsmap is not None else nsptag.nsmap return oxml_parser.makeelement(nsptag.clark_name, nsmap=nsmap)
[ "def", "OxmlElement", "(", "nsptag_str", ",", "nsmap", "=", "None", ")", ":", "nsptag", "=", "NamespacePrefixedTag", "(", "nsptag_str", ")", "nsmap", "=", "nsmap", "if", "nsmap", "is", "not", "None", "else", "nsptag", ".", "nsmap", "return", "oxml_parser", ".", "makeelement", "(", "nsptag", ".", "clark_name", ",", "nsmap", "=", "nsmap", ")" ]
Return a 'loose' lxml element having the tag specified by *nsptag_str*. *nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'. The resulting element is an instance of the custom element class for this tag name if one is defined.
[ "Return", "a", "loose", "lxml", "element", "having", "the", "tag", "specified", "by", "*", "nsptag_str", "*", ".", "*", "nsptag_str", "*", "must", "contain", "the", "standard", "namespace", "prefix", "e", ".", "g", ".", "a", ":", "tbl", ".", "The", "resulting", "element", "is", "an", "instance", "of", "the", "custom", "element", "class", "for", "this", "tag", "name", "if", "one", "is", "defined", "." ]
python
train
47.8
edoburu/django-any-urlfield
any_urlfield/templatetags/any_urlfield_tags.py
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/templatetags/any_urlfield_tags.py#L31-L45
def withdict(parser, token): """ Take a complete context dict as extra layer. """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("{% withdict %} expects one argument") nodelist = parser.parse(('endwithdict',)) parser.delete_first_token() return WithDictNode( nodelist=nodelist, context_expr=parser.compile_filter(bits[1]) )
[ "def", "withdict", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "!=", "2", ":", "raise", "TemplateSyntaxError", "(", "\"{% withdict %} expects one argument\"", ")", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endwithdict'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "WithDictNode", "(", "nodelist", "=", "nodelist", ",", "context_expr", "=", "parser", ".", "compile_filter", "(", "bits", "[", "1", "]", ")", ")" ]
Take a complete context dict as extra layer.
[ "Take", "a", "complete", "context", "dict", "as", "extra", "layer", "." ]
python
train
26.6
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L4931-L4936
def set_argsx(self, arguments, *args): """ Setup the command line arguments, the first item must be an (absolute) filename to run. Variadic function, must be NULL terminated. """ return lib.zproc_set_argsx(self._as_parameter_, arguments, *args)
[ "def", "set_argsx", "(", "self", ",", "arguments", ",", "*", "args", ")", ":", "return", "lib", ".", "zproc_set_argsx", "(", "self", ".", "_as_parameter_", ",", "arguments", ",", "*", "args", ")" ]
Setup the command line arguments, the first item must be an (absolute) filename to run. Variadic function, must be NULL terminated.
[ "Setup", "the", "command", "line", "arguments", "the", "first", "item", "must", "be", "an", "(", "absolute", ")", "filename", "to", "run", ".", "Variadic", "function", "must", "be", "NULL", "terminated", "." ]
python
train
45.166667
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L230-L247
def _parse_request_arguments(self, request): """Parses comma separated request arguments Args: request: A request that should contain 'inference_address', 'model_name', 'model_version', 'model_signature'. Returns: A tuple of lists for model parameters """ inference_addresses = request.args.get('inference_address').split(',') model_names = request.args.get('model_name').split(',') model_versions = request.args.get('model_version').split(',') model_signatures = request.args.get('model_signature').split(',') if len(model_names) != len(inference_addresses): raise common_utils.InvalidUserInputError('Every model should have a ' + 'name and address.') return inference_addresses, model_names, model_versions, model_signatures
[ "def", "_parse_request_arguments", "(", "self", ",", "request", ")", ":", "inference_addresses", "=", "request", ".", "args", ".", "get", "(", "'inference_address'", ")", ".", "split", "(", "','", ")", "model_names", "=", "request", ".", "args", ".", "get", "(", "'model_name'", ")", ".", "split", "(", "','", ")", "model_versions", "=", "request", ".", "args", ".", "get", "(", "'model_version'", ")", ".", "split", "(", "','", ")", "model_signatures", "=", "request", ".", "args", ".", "get", "(", "'model_signature'", ")", ".", "split", "(", "','", ")", "if", "len", "(", "model_names", ")", "!=", "len", "(", "inference_addresses", ")", ":", "raise", "common_utils", ".", "InvalidUserInputError", "(", "'Every model should have a '", "+", "'name and address.'", ")", "return", "inference_addresses", ",", "model_names", ",", "model_versions", ",", "model_signatures" ]
Parses comma separated request arguments Args: request: A request that should contain 'inference_address', 'model_name', 'model_version', 'model_signature'. Returns: A tuple of lists for model parameters
[ "Parses", "comma", "separated", "request", "arguments" ]
python
train
45.833333
RaRe-Technologies/smart_open
smart_open/smart_open_lib.py
https://github.com/RaRe-Technologies/smart_open/blob/2dc8d60f223fc7b00a2000c56362a7bd6cd0850e/smart_open/smart_open_lib.py#L442-L501
def _shortcut_open( uri, mode, ignore_ext=False, buffering=-1, encoding=None, errors=None, ): """Try to open the URI using the standard library io.open function. This can be much faster than the alternative of opening in binary mode and then decoding. This is only possible under the following conditions: 1. Opening a local file 2. Ignore extension is set to True If it is not possible to use the built-in open for the specified URI, returns None. :param str uri: A string indicating what to open. :param str mode: The mode to pass to the open function. :param dict kw: :returns: The opened file :rtype: file """ if not isinstance(uri, six.string_types): return None parsed_uri = _parse_uri(uri) if parsed_uri.scheme != 'file': return None _, extension = P.splitext(parsed_uri.uri_path) if extension in _COMPRESSOR_REGISTRY and not ignore_ext: return None open_kwargs = {} if encoding is not None: open_kwargs['encoding'] = encoding mode = mode.replace('b', '') # # binary mode of the builtin/stdlib open function doesn't take an errors argument # if errors and 'b' not in mode: open_kwargs['errors'] = errors # # Under Py3, the built-in open accepts kwargs, and it's OK to use that. # Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it # whenever possible (see issue #207). If we're under Py2 and have to use # kwargs, then we have no option other to use io.open. # if six.PY3: return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs) elif not open_kwargs: return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering) return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
[ "def", "_shortcut_open", "(", "uri", ",", "mode", ",", "ignore_ext", "=", "False", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", ")", ":", "if", "not", "isinstance", "(", "uri", ",", "six", ".", "string_types", ")", ":", "return", "None", "parsed_uri", "=", "_parse_uri", "(", "uri", ")", "if", "parsed_uri", ".", "scheme", "!=", "'file'", ":", "return", "None", "_", ",", "extension", "=", "P", ".", "splitext", "(", "parsed_uri", ".", "uri_path", ")", "if", "extension", "in", "_COMPRESSOR_REGISTRY", "and", "not", "ignore_ext", ":", "return", "None", "open_kwargs", "=", "{", "}", "if", "encoding", "is", "not", "None", ":", "open_kwargs", "[", "'encoding'", "]", "=", "encoding", "mode", "=", "mode", ".", "replace", "(", "'b'", ",", "''", ")", "#", "# binary mode of the builtin/stdlib open function doesn't take an errors argument", "#", "if", "errors", "and", "'b'", "not", "in", "mode", ":", "open_kwargs", "[", "'errors'", "]", "=", "errors", "#", "# Under Py3, the built-in open accepts kwargs, and it's OK to use that.", "# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it", "# whenever possible (see issue #207). If we're under Py2 and have to use", "# kwargs, then we have no option other to use io.open.", "#", "if", "six", ".", "PY3", ":", "return", "_builtin_open", "(", "parsed_uri", ".", "uri_path", ",", "mode", ",", "buffering", "=", "buffering", ",", "*", "*", "open_kwargs", ")", "elif", "not", "open_kwargs", ":", "return", "_builtin_open", "(", "parsed_uri", ".", "uri_path", ",", "mode", ",", "buffering", "=", "buffering", ")", "return", "io", ".", "open", "(", "parsed_uri", ".", "uri_path", ",", "mode", ",", "buffering", "=", "buffering", ",", "*", "*", "open_kwargs", ")" ]
Try to open the URI using the standard library io.open function. This can be much faster than the alternative of opening in binary mode and then decoding. This is only possible under the following conditions: 1. Opening a local file 2. Ignore extension is set to True If it is not possible to use the built-in open for the specified URI, returns None. :param str uri: A string indicating what to open. :param str mode: The mode to pass to the open function. :param dict kw: :returns: The opened file :rtype: file
[ "Try", "to", "open", "the", "URI", "using", "the", "standard", "library", "io", ".", "open", "function", "." ]
python
train
31.133333
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/2065c568502b19b8634241b47fd96930d1bf948d/wikipedia/wikipedia.py#L188-L211
def random(pages=1): ''' Get a list of random Wikipedia article titles. .. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. Keyword arguments: * pages - the number of random pages returned (max of 10) ''' #http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm query_params = { 'list': 'random', 'rnnamespace': 0, 'rnlimit': pages, } request = _wiki_request(query_params) titles = [page['title'] for page in request['query']['random']] if len(titles) == 1: return titles[0] return titles
[ "def", "random", "(", "pages", "=", "1", ")", ":", "#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm", "query_params", "=", "{", "'list'", ":", "'random'", ",", "'rnnamespace'", ":", "0", ",", "'rnlimit'", ":", "pages", ",", "}", "request", "=", "_wiki_request", "(", "query_params", ")", "titles", "=", "[", "page", "[", "'title'", "]", "for", "page", "in", "request", "[", "'query'", "]", "[", "'random'", "]", "]", "if", "len", "(", "titles", ")", "==", "1", ":", "return", "titles", "[", "0", "]", "return", "titles" ]
Get a list of random Wikipedia article titles. .. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. Keyword arguments: * pages - the number of random pages returned (max of 10)
[ "Get", "a", "list", "of", "random", "Wikipedia", "article", "titles", "." ]
python
train
25.333333
nilp0inter/cpe
cpe/cpe2_3_wfn.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_wfn.py#L110-L216
def _parse(self): """ Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name """ # Check prefix and initial bracket of WFN if self._str[0:5] != CPE2_3_WFN.CPE_PREFIX: errmsg = "Bad-formed CPE Name: WFN prefix not found" raise ValueError(errmsg) # Check final backet if self._str[-1:] != "]": errmsg = "Bad-formed CPE Name: final bracket of WFN not found" raise ValueError(errmsg) content = self._str[5:-1] if content != "": # Dictionary with pairs attribute-value components = dict() # Split WFN in components list_component = content.split(CPEComponent2_3_WFN.SEPARATOR_COMP) # Adds the defined components for e in list_component: # Whitespace not valid in component names and values if e.find(" ") != -1: msg = "Bad-formed CPE Name: WFN with too many whitespaces" raise ValueError(msg) # Split pair attribute-value pair = e.split(CPEComponent2_3_WFN.SEPARATOR_PAIR) att_name = pair[0] att_value = pair[1] # Check valid attribute name if att_name not in CPEComponent.CPE_COMP_KEYS_EXTENDED: msg = "Bad-formed CPE Name: invalid attribute name '{0}'".format( att_name) raise ValueError(msg) if att_name in components: # Duplicate attribute msg = "Bad-formed CPE Name: attribute '{0}' repeated".format( att_name) raise ValueError(msg) if not (att_value.startswith('"') and att_value.endswith('"')): # Logical value strUpper = att_value.upper() if strUpper == CPEComponent2_3_WFN.VALUE_ANY: comp = CPEComponentAnyValue() elif strUpper == CPEComponent2_3_WFN.VALUE_NA: comp = CPEComponentNotApplicable() else: msg = "Invalid logical value '{0}'".format(att_value) raise ValueError(msg) elif att_value.startswith('"') and att_value.endswith('"'): # String value comp = CPEComponent2_3_WFN(att_value, att_name) else: # Bad value msg = "Bad-formed CPE Name: invalid value '{0}'".format( att_value) raise ValueError(msg) components[att_name] = comp # Adds the undefined components for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED: if ck not in components: components[ck] = CPEComponentUndefined() # ####################### # Storage of CPE Name # # ####################### part_comp = components[CPEComponent.ATT_PART] if isinstance(part_comp, CPEComponentLogical): elements = [] elements.append(components) self[CPE.KEY_UNDEFINED] = elements else: # Create internal structure of CPE Name in parts: # one of them is filled with identified components, # the rest are empty part_value = part_comp.get_value() # Del double quotes of value system = part_value[1:-1] if system in CPEComponent.SYSTEM_VALUES: self._create_cpe_parts(system, components) else: self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED, components) # Fills the empty parts of internal structure of CPE Name for pk in CPE.CPE_PART_KEYS: if pk not in self.keys(): self[pk] = []
[ "def", "_parse", "(", "self", ")", ":", "# Check prefix and initial bracket of WFN", "if", "self", ".", "_str", "[", "0", ":", "5", "]", "!=", "CPE2_3_WFN", ".", "CPE_PREFIX", ":", "errmsg", "=", "\"Bad-formed CPE Name: WFN prefix not found\"", "raise", "ValueError", "(", "errmsg", ")", "# Check final backet", "if", "self", ".", "_str", "[", "-", "1", ":", "]", "!=", "\"]\"", ":", "errmsg", "=", "\"Bad-formed CPE Name: final bracket of WFN not found\"", "raise", "ValueError", "(", "errmsg", ")", "content", "=", "self", ".", "_str", "[", "5", ":", "-", "1", "]", "if", "content", "!=", "\"\"", ":", "# Dictionary with pairs attribute-value", "components", "=", "dict", "(", ")", "# Split WFN in components", "list_component", "=", "content", ".", "split", "(", "CPEComponent2_3_WFN", ".", "SEPARATOR_COMP", ")", "# Adds the defined components", "for", "e", "in", "list_component", ":", "# Whitespace not valid in component names and values", "if", "e", ".", "find", "(", "\" \"", ")", "!=", "-", "1", ":", "msg", "=", "\"Bad-formed CPE Name: WFN with too many whitespaces\"", "raise", "ValueError", "(", "msg", ")", "# Split pair attribute-value", "pair", "=", "e", ".", "split", "(", "CPEComponent2_3_WFN", ".", "SEPARATOR_PAIR", ")", "att_name", "=", "pair", "[", "0", "]", "att_value", "=", "pair", "[", "1", "]", "# Check valid attribute name", "if", "att_name", "not", "in", "CPEComponent", ".", "CPE_COMP_KEYS_EXTENDED", ":", "msg", "=", "\"Bad-formed CPE Name: invalid attribute name '{0}'\"", ".", "format", "(", "att_name", ")", "raise", "ValueError", "(", "msg", ")", "if", "att_name", "in", "components", ":", "# Duplicate attribute", "msg", "=", "\"Bad-formed CPE Name: attribute '{0}' repeated\"", ".", "format", "(", "att_name", ")", "raise", "ValueError", "(", "msg", ")", "if", "not", "(", "att_value", ".", "startswith", "(", "'\"'", ")", "and", "att_value", ".", "endswith", "(", "'\"'", ")", ")", ":", "# Logical value", "strUpper", "=", "att_value", ".", "upper", "(", ")", "if", "strUpper", "==", "CPEComponent2_3_WFN", ".", "VALUE_ANY", ":", "comp", "=", "CPEComponentAnyValue", "(", ")", "elif", "strUpper", "==", "CPEComponent2_3_WFN", ".", "VALUE_NA", ":", "comp", "=", "CPEComponentNotApplicable", "(", ")", "else", ":", "msg", "=", "\"Invalid logical value '{0}'\"", ".", "format", "(", "att_value", ")", "raise", "ValueError", "(", "msg", ")", "elif", "att_value", ".", "startswith", "(", "'\"'", ")", "and", "att_value", ".", "endswith", "(", "'\"'", ")", ":", "# String value", "comp", "=", "CPEComponent2_3_WFN", "(", "att_value", ",", "att_name", ")", "else", ":", "# Bad value", "msg", "=", "\"Bad-formed CPE Name: invalid value '{0}'\"", ".", "format", "(", "att_value", ")", "raise", "ValueError", "(", "msg", ")", "components", "[", "att_name", "]", "=", "comp", "# Adds the undefined components", "for", "ck", "in", "CPEComponent", ".", "CPE_COMP_KEYS_EXTENDED", ":", "if", "ck", "not", "in", "components", ":", "components", "[", "ck", "]", "=", "CPEComponentUndefined", "(", ")", "# #######################", "# Storage of CPE Name #", "# #######################", "part_comp", "=", "components", "[", "CPEComponent", ".", "ATT_PART", "]", "if", "isinstance", "(", "part_comp", ",", "CPEComponentLogical", ")", ":", "elements", "=", "[", "]", "elements", ".", "append", "(", "components", ")", "self", "[", "CPE", ".", "KEY_UNDEFINED", "]", "=", "elements", "else", ":", "# Create internal structure of CPE Name in parts:", "# one of them is filled with identified components,", "# the rest are empty", "part_value", "=", "part_comp", ".", "get_value", "(", ")", "# Del double quotes of value", "system", "=", "part_value", "[", "1", ":", "-", "1", "]", "if", "system", "in", "CPEComponent", ".", "SYSTEM_VALUES", ":", "self", ".", "_create_cpe_parts", "(", "system", ",", "components", ")", "else", ":", "self", ".", "_create_cpe_parts", "(", "CPEComponent", ".", "VALUE_PART_UNDEFINED", ",", "components", ")", "# Fills the empty parts of internal structure of CPE Name", "for", "pk", "in", "CPE", ".", "CPE_PART_KEYS", ":", "if", "pk", "not", "in", "self", ".", "keys", "(", ")", ":", "self", "[", "pk", "]", "=", "[", "]" ]
Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name
[ "Checks", "if", "the", "CPE", "Name", "is", "valid", "." ]
python
train
38.065421
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L109-L127
def mint_sub(client_salt, sector_id="", subject_type="public", uid='', user_salt=''): """ Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier """ if subject_type == "public": sub = hashlib.sha256( "{}{}".format(uid, user_salt).encode("utf-8")).hexdigest() else: sub = pairwise_id(uid, sector_id, "{}{}".format(client_salt, user_salt)) return sub
[ "def", "mint_sub", "(", "client_salt", ",", "sector_id", "=", "\"\"", ",", "subject_type", "=", "\"public\"", ",", "uid", "=", "''", ",", "user_salt", "=", "''", ")", ":", "if", "subject_type", "==", "\"public\"", ":", "sub", "=", "hashlib", ".", "sha256", "(", "\"{}{}\"", ".", "format", "(", "uid", ",", "user_salt", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "else", ":", "sub", "=", "pairwise_id", "(", "uid", ",", "sector_id", ",", "\"{}{}\"", ".", "format", "(", "client_salt", ",", "user_salt", ")", ")", "return", "sub" ]
Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier
[ "Mint", "a", "new", "sub", "(", "subject", "identifier", ")" ]
python
train
34.263158
awslabs/sockeye
sockeye/data_io.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/data_io.py#L522-L531
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: """ Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards. """ return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
[ "def", "get_num_shards", "(", "num_samples", ":", "int", ",", "samples_per_shard", ":", "int", ",", "min_num_shards", ":", "int", ")", "->", "int", ":", "return", "max", "(", "int", "(", "math", ".", "ceil", "(", "num_samples", "/", "samples_per_shard", ")", ")", ",", "min_num_shards", ")" ]
Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards.
[ "Returns", "the", "number", "of", "shards", "." ]
python
train
40.1
pypa/pipenv
pipenv/patched/notpip/_internal/cli/parser.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L251-L261
def invalid_config_error_message(action, key, val): """Returns a better error message when invalid configuration option is provided.""" if action in ('store_true', 'store_false'): return ("{0} is not a valid value for {1} option, " "please specify a boolean value like yes/no, " "true/false or 1/0 instead.").format(val, key) return ("{0} is not a valid value for {1} option, " "please specify a numerical value like 1/0 " "instead.").format(val, key)
[ "def", "invalid_config_error_message", "(", "action", ",", "key", ",", "val", ")", ":", "if", "action", "in", "(", "'store_true'", ",", "'store_false'", ")", ":", "return", "(", "\"{0} is not a valid value for {1} option, \"", "\"please specify a boolean value like yes/no, \"", "\"true/false or 1/0 instead.\"", ")", ".", "format", "(", "val", ",", "key", ")", "return", "(", "\"{0} is not a valid value for {1} option, \"", "\"please specify a numerical value like 1/0 \"", "\"instead.\"", ")", ".", "format", "(", "val", ",", "key", ")" ]
Returns a better error message when invalid configuration option is provided.
[ "Returns", "a", "better", "error", "message", "when", "invalid", "configuration", "option", "is", "provided", "." ]
python
train
47.454545
tBuLi/symfit
symfit/core/fit.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1043-L1051
def independent_data(self): """ Read-only Property :return: Data belonging to each independent variable as a dict with variable names as key, data as value. :rtype: collections.OrderedDict """ return OrderedDict((var, self.data[var]) for var in self.model.independent_vars)
[ "def", "independent_data", "(", "self", ")", ":", "return", "OrderedDict", "(", "(", "var", ",", "self", ".", "data", "[", "var", "]", ")", "for", "var", "in", "self", ".", "model", ".", "independent_vars", ")" ]
Read-only Property :return: Data belonging to each independent variable as a dict with variable names as key, data as value. :rtype: collections.OrderedDict
[ "Read", "-", "only", "Property" ]
python
train
36.777778
manns/pyspread
pyspread/src/gui/_chart_dialog.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L639-L645
def OnSecondaryCheckbox(self, event): """Top Checkbox event handler""" self.attrs["top"] = event.IsChecked() self.attrs["right"] = event.IsChecked() post_command_event(self, self.DrawChartMsg)
[ "def", "OnSecondaryCheckbox", "(", "self", ",", "event", ")", ":", "self", ".", "attrs", "[", "\"top\"", "]", "=", "event", ".", "IsChecked", "(", ")", "self", ".", "attrs", "[", "\"right\"", "]", "=", "event", ".", "IsChecked", "(", ")", "post_command_event", "(", "self", ",", "self", ".", "DrawChartMsg", ")" ]
Top Checkbox event handler
[ "Top", "Checkbox", "event", "handler" ]
python
train
31.428571
jonhadfield/python-hosts
python_hosts/hosts.py
https://github.com/jonhadfield/python-hosts/blob/9ccaa8edc63418a91f10bf732b26070f21dd2ad0/python_hosts/hosts.py#L177-L221
def write(self, path=None): """ Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts """ written_count = 0 comments_written = 0 blanks_written = 0 ipv4_entries_written = 0 ipv6_entries_written = 0 if path: output_file_path = path else: output_file_path = self.hosts_path try: with open(output_file_path, 'w') as hosts_file: for written_count, line in enumerate(self.entries): if line.entry_type == 'comment': hosts_file.write(line.comment + "\n") comments_written += 1 if line.entry_type == 'blank': hosts_file.write("\n") blanks_written += 1 if line.entry_type == 'ipv4': hosts_file.write( "{0}\t{1}\n".format( line.address, ' '.join(line.names), ) ) ipv4_entries_written += 1 if line.entry_type == 'ipv6': hosts_file.write( "{0}\t{1}\n".format( line.address, ' '.join(line.names), )) ipv6_entries_written += 1 except: raise UnableToWriteHosts() return {'total_written': written_count + 1, 'comments_written': comments_written, 'blanks_written': blanks_written, 'ipv4_entries_written': ipv4_entries_written, 'ipv6_entries_written': ipv6_entries_written}
[ "def", "write", "(", "self", ",", "path", "=", "None", ")", ":", "written_count", "=", "0", "comments_written", "=", "0", "blanks_written", "=", "0", "ipv4_entries_written", "=", "0", "ipv6_entries_written", "=", "0", "if", "path", ":", "output_file_path", "=", "path", "else", ":", "output_file_path", "=", "self", ".", "hosts_path", "try", ":", "with", "open", "(", "output_file_path", ",", "'w'", ")", "as", "hosts_file", ":", "for", "written_count", ",", "line", "in", "enumerate", "(", "self", ".", "entries", ")", ":", "if", "line", ".", "entry_type", "==", "'comment'", ":", "hosts_file", ".", "write", "(", "line", ".", "comment", "+", "\"\\n\"", ")", "comments_written", "+=", "1", "if", "line", ".", "entry_type", "==", "'blank'", ":", "hosts_file", ".", "write", "(", "\"\\n\"", ")", "blanks_written", "+=", "1", "if", "line", ".", "entry_type", "==", "'ipv4'", ":", "hosts_file", ".", "write", "(", "\"{0}\\t{1}\\n\"", ".", "format", "(", "line", ".", "address", ",", "' '", ".", "join", "(", "line", ".", "names", ")", ",", ")", ")", "ipv4_entries_written", "+=", "1", "if", "line", ".", "entry_type", "==", "'ipv6'", ":", "hosts_file", ".", "write", "(", "\"{0}\\t{1}\\n\"", ".", "format", "(", "line", ".", "address", ",", "' '", ".", "join", "(", "line", ".", "names", ")", ",", ")", ")", "ipv6_entries_written", "+=", "1", "except", ":", "raise", "UnableToWriteHosts", "(", ")", "return", "{", "'total_written'", ":", "written_count", "+", "1", ",", "'comments_written'", ":", "comments_written", ",", "'blanks_written'", ":", "blanks_written", ",", "'ipv4_entries_written'", ":", "ipv4_entries_written", ",", "'ipv6_entries_written'", ":", "ipv6_entries_written", "}" ]
Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts
[ "Write", "all", "of", "the", "HostsEntry", "instances", "back", "to", "the", "hosts", "file", ":", "param", "path", ":", "override", "the", "write", "path", ":", "return", ":", "Dictionary", "containing", "counts" ]
python
train
41.088889
djordon/queueing-tool
queueing_tool/network/queue_network.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/network/queue_network.py#L612-L640
def clear_data(self, queues=None, edge=None, edge_type=None): """Clears data from all queues. If none of the parameters are given then every queue's data is cleared. Parameters ---------- queues : int or an iterable of int (optional) The edge index (or an iterable of edge indices) identifying the :class:`QueueServer(s)<.QueueServer>` whose data will be cleared. edge : 2-tuple of int or *array_like* (optional) Explicitly specify which queues' data to clear. Must be either: * A 2-tuple of the edge's source and target vertex indices, or * An iterable of 2-tuples of the edge's source and target vertex indices. edge_type : int or an iterable of int (optional) A integer, or a collection of integers identifying which edge types will have their data cleared. """ queues = _get_queues(self.g, queues, edge, edge_type) for k in queues: self.edge2queue[k].data = {}
[ "def", "clear_data", "(", "self", ",", "queues", "=", "None", ",", "edge", "=", "None", ",", "edge_type", "=", "None", ")", ":", "queues", "=", "_get_queues", "(", "self", ".", "g", ",", "queues", ",", "edge", ",", "edge_type", ")", "for", "k", "in", "queues", ":", "self", ".", "edge2queue", "[", "k", "]", ".", "data", "=", "{", "}" ]
Clears data from all queues. If none of the parameters are given then every queue's data is cleared. Parameters ---------- queues : int or an iterable of int (optional) The edge index (or an iterable of edge indices) identifying the :class:`QueueServer(s)<.QueueServer>` whose data will be cleared. edge : 2-tuple of int or *array_like* (optional) Explicitly specify which queues' data to clear. Must be either: * A 2-tuple of the edge's source and target vertex indices, or * An iterable of 2-tuples of the edge's source and target vertex indices. edge_type : int or an iterable of int (optional) A integer, or a collection of integers identifying which edge types will have their data cleared.
[ "Clears", "data", "from", "all", "queues", "." ]
python
valid
37.137931
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/assert_sysmeta.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/assert_sysmeta.py#L36-L46
def sanity(request, sysmeta_pyxb): """Check that sysmeta_pyxb is suitable for creating a new object and matches the uploaded sciobj bytes.""" _does_not_contain_replica_sections(sysmeta_pyxb) _is_not_archived(sysmeta_pyxb) _obsoleted_by_not_specified(sysmeta_pyxb) if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META: return _has_correct_file_size(request, sysmeta_pyxb) _is_supported_checksum_algorithm(sysmeta_pyxb) _is_correct_checksum(request, sysmeta_pyxb)
[ "def", "sanity", "(", "request", ",", "sysmeta_pyxb", ")", ":", "_does_not_contain_replica_sections", "(", "sysmeta_pyxb", ")", "_is_not_archived", "(", "sysmeta_pyxb", ")", "_obsoleted_by_not_specified", "(", "sysmeta_pyxb", ")", "if", "'HTTP_VENDOR_GMN_REMOTE_URL'", "in", "request", ".", "META", ":", "return", "_has_correct_file_size", "(", "request", ",", "sysmeta_pyxb", ")", "_is_supported_checksum_algorithm", "(", "sysmeta_pyxb", ")", "_is_correct_checksum", "(", "request", ",", "sysmeta_pyxb", ")" ]
Check that sysmeta_pyxb is suitable for creating a new object and matches the uploaded sciobj bytes.
[ "Check", "that", "sysmeta_pyxb", "is", "suitable", "for", "creating", "a", "new", "object", "and", "matches", "the", "uploaded", "sciobj", "bytes", "." ]
python
train
44.545455
tanghaibao/goatools
goatools/anno/annoreader_base.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/annoreader_base.py#L104-L115
def _prt_qualifiers(associations, prt=sys.stdout): """Print Qualifiers found in the annotations. QUALIFIERS: 1,462 colocalizes_with 1,454 contributes_to 1,157 not 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs) 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs) """ prt.write('QUALIFIERS:\n') for fld, cnt in cx.Counter(q for nt in associations for q in nt.Qualifier).most_common(): prt.write(' {N:6,} {FLD}\n'.format(N=cnt, FLD=fld))
[ "def", "_prt_qualifiers", "(", "associations", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "prt", ".", "write", "(", "'QUALIFIERS:\\n'", ")", "for", "fld", ",", "cnt", "in", "cx", ".", "Counter", "(", "q", "for", "nt", "in", "associations", "for", "q", "in", "nt", ".", "Qualifier", ")", ".", "most_common", "(", ")", ":", "prt", ".", "write", "(", "' {N:6,} {FLD}\\n'", ".", "format", "(", "N", "=", "cnt", ",", "FLD", "=", "fld", ")", ")" ]
Print Qualifiers found in the annotations. QUALIFIERS: 1,462 colocalizes_with 1,454 contributes_to 1,157 not 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs) 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)
[ "Print", "Qualifiers", "found", "in", "the", "annotations", ".", "QUALIFIERS", ":", "1", "462", "colocalizes_with", "1", "454", "contributes_to", "1", "157", "not", "13", "not", "colocalizes_with", "(", "TBD", ":", "CHK", "-", "Seen", "in", "gene2go", "but", "not", "gafs", ")", "4", "not", "contributes_to", "(", "TBD", ":", "CHK", "-", "Seen", "in", "gene2go", "but", "not", "gafs", ")" ]
python
train
50.5
gem/oq-engine
openquake/hazardlib/gsim/convertito_2012.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/convertito_2012.py#L109-L116
def _compute_site_scaling(self, C, vs30): """ Returns the site scaling term as a simple coefficient """ site_term = np.zeros(len(vs30), dtype=float) # For soil sites add on the site coefficient site_term[vs30 < 760.0] = C["e"] return site_term
[ "def", "_compute_site_scaling", "(", "self", ",", "C", ",", "vs30", ")", ":", "site_term", "=", "np", ".", "zeros", "(", "len", "(", "vs30", ")", ",", "dtype", "=", "float", ")", "# For soil sites add on the site coefficient", "site_term", "[", "vs30", "<", "760.0", "]", "=", "C", "[", "\"e\"", "]", "return", "site_term" ]
Returns the site scaling term as a simple coefficient
[ "Returns", "the", "site", "scaling", "term", "as", "a", "simple", "coefficient" ]
python
train
36.5
readbeyond/aeneas
aeneas/syncmap/fragmentlist.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/fragmentlist.py#L248-L271
def sort(self): """ Sort the fragments in the list. :raises ValueError: if there is a fragment which violates the list constraints """ if self.is_guaranteed_sorted: self.log(u"Already sorted, returning") return self.log(u"Sorting...") self.__fragments = sorted(self.__fragments) self.log(u"Sorting... done") self.log(u"Checking relative positions...") for i in range(len(self) - 1): current_interval = self[i].interval next_interval = self[i + 1].interval if current_interval.relative_position_of(next_interval) not in self.ALLOWED_POSITIONS: self.log(u"Found overlapping fragments:") self.log([u" Index %d => %s", i, current_interval]) self.log([u" Index %d => %s", i + 1, next_interval]) self.log_exc(u"The list contains two fragments overlapping in a forbidden way", None, True, ValueError) self.log(u"Checking relative positions... done") self.__sorted = True
[ "def", "sort", "(", "self", ")", ":", "if", "self", ".", "is_guaranteed_sorted", ":", "self", ".", "log", "(", "u\"Already sorted, returning\"", ")", "return", "self", ".", "log", "(", "u\"Sorting...\"", ")", "self", ".", "__fragments", "=", "sorted", "(", "self", ".", "__fragments", ")", "self", ".", "log", "(", "u\"Sorting... done\"", ")", "self", ".", "log", "(", "u\"Checking relative positions...\"", ")", "for", "i", "in", "range", "(", "len", "(", "self", ")", "-", "1", ")", ":", "current_interval", "=", "self", "[", "i", "]", ".", "interval", "next_interval", "=", "self", "[", "i", "+", "1", "]", ".", "interval", "if", "current_interval", ".", "relative_position_of", "(", "next_interval", ")", "not", "in", "self", ".", "ALLOWED_POSITIONS", ":", "self", ".", "log", "(", "u\"Found overlapping fragments:\"", ")", "self", ".", "log", "(", "[", "u\" Index %d => %s\"", ",", "i", ",", "current_interval", "]", ")", "self", ".", "log", "(", "[", "u\" Index %d => %s\"", ",", "i", "+", "1", ",", "next_interval", "]", ")", "self", ".", "log_exc", "(", "u\"The list contains two fragments overlapping in a forbidden way\"", ",", "None", ",", "True", ",", "ValueError", ")", "self", ".", "log", "(", "u\"Checking relative positions... done\"", ")", "self", ".", "__sorted", "=", "True" ]
Sort the fragments in the list. :raises ValueError: if there is a fragment which violates the list constraints
[ "Sort", "the", "fragments", "in", "the", "list", "." ]
python
train
45.458333
minhhoit/yacms
yacms/boot/__init__.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/boot/__init__.py#L38-L52
def import_field(field_classpath): """ Imports a field by its dotted class path, prepending "django.db.models" to raw class names and raising an exception if the import fails. """ if '.' in field_classpath: fully_qualified = field_classpath else: fully_qualified = "django.db.models.%s" % field_classpath try: return import_dotted_path(fully_qualified) except ImportError: raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains " "the field '%s' which could not be " "imported." % field_classpath)
[ "def", "import_field", "(", "field_classpath", ")", ":", "if", "'.'", "in", "field_classpath", ":", "fully_qualified", "=", "field_classpath", "else", ":", "fully_qualified", "=", "\"django.db.models.%s\"", "%", "field_classpath", "try", ":", "return", "import_dotted_path", "(", "fully_qualified", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "\"The EXTRA_MODEL_FIELDS setting contains \"", "\"the field '%s' which could not be \"", "\"imported.\"", "%", "field_classpath", ")" ]
Imports a field by its dotted class path, prepending "django.db.models" to raw class names and raising an exception if the import fails.
[ "Imports", "a", "field", "by", "its", "dotted", "class", "path", "prepending", "django", ".", "db", ".", "models", "to", "raw", "class", "names", "and", "raising", "an", "exception", "if", "the", "import", "fails", "." ]
python
train
42
nirum/descent
descent/proxops.py
https://github.com/nirum/descent/blob/074c8452f15a0da638668a4fe139fde06ccfae7f/descent/proxops.py#L251-L259
def columns(x, rho, proxop): """Applies a proximal operator to the columns of a matrix""" xnext = np.zeros_like(x) for ix in range(x.shape[1]): xnext[:, ix] = proxop(x[:, ix], rho) return xnext
[ "def", "columns", "(", "x", ",", "rho", ",", "proxop", ")", ":", "xnext", "=", "np", ".", "zeros_like", "(", "x", ")", "for", "ix", "in", "range", "(", "x", ".", "shape", "[", "1", "]", ")", ":", "xnext", "[", ":", ",", "ix", "]", "=", "proxop", "(", "x", "[", ":", ",", "ix", "]", ",", "rho", ")", "return", "xnext" ]
Applies a proximal operator to the columns of a matrix
[ "Applies", "a", "proximal", "operator", "to", "the", "columns", "of", "a", "matrix" ]
python
valid
23.555556
enkore/i3pystatus
i3pystatus/core/io.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/io.py#L185-L190
def read(self): """Iterate over all JSON input (Generator)""" for line in self.io.read(): with self.parse_line(line) as j: yield j
[ "def", "read", "(", "self", ")", ":", "for", "line", "in", "self", ".", "io", ".", "read", "(", ")", ":", "with", "self", ".", "parse_line", "(", "line", ")", "as", "j", ":", "yield", "j" ]
Iterate over all JSON input (Generator)
[ "Iterate", "over", "all", "JSON", "input", "(", "Generator", ")" ]
python
train
28.333333
juju/charm-helpers
charmhelpers/core/host.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/host.py#L669-L681
def check_hash(path, checksum, hash_type='md5'): """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
[ "def", "check_hash", "(", "path", ",", "checksum", ",", "hash_type", "=", "'md5'", ")", ":", "actual_checksum", "=", "file_hash", "(", "path", ",", "hash_type", ")", "if", "checksum", "!=", "actual_checksum", ":", "raise", "ChecksumError", "(", "\"'%s' != '%s'\"", "%", "(", "checksum", ",", "actual_checksum", ")", ")" ]
Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum
[ "Validate", "a", "file", "using", "a", "cryptographic", "checksum", "." ]
python
train
44.153846
hyperledger/sawtooth-core
validator/sawtooth_validator/journal/consensus/consensus_factory.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/journal/consensus/consensus_factory.py#L27-L56
def get_consensus_module(module_name): """Returns a consensus module by name. Args: module_name (str): The name of the module to load. Returns: module: The consensus module. Raises: UnknownConsensusModuleError: Raised if the given module_name does not correspond to a consensus implementation. """ module_package = module_name if module_name == 'genesis': module_package = ( 'sawtooth_validator.journal.consensus.genesis.' 'genesis_consensus' ) elif module_name == 'devmode': module_package = ( 'sawtooth_validator.journal.consensus.dev_mode.' 'dev_mode_consensus' ) try: return importlib.import_module(module_package) except ImportError: raise UnknownConsensusModuleError( 'Consensus module "{}" does not exist.'.format(module_name))
[ "def", "get_consensus_module", "(", "module_name", ")", ":", "module_package", "=", "module_name", "if", "module_name", "==", "'genesis'", ":", "module_package", "=", "(", "'sawtooth_validator.journal.consensus.genesis.'", "'genesis_consensus'", ")", "elif", "module_name", "==", "'devmode'", ":", "module_package", "=", "(", "'sawtooth_validator.journal.consensus.dev_mode.'", "'dev_mode_consensus'", ")", "try", ":", "return", "importlib", ".", "import_module", "(", "module_package", ")", "except", "ImportError", ":", "raise", "UnknownConsensusModuleError", "(", "'Consensus module \"{}\" does not exist.'", ".", "format", "(", "module_name", ")", ")" ]
Returns a consensus module by name. Args: module_name (str): The name of the module to load. Returns: module: The consensus module. Raises: UnknownConsensusModuleError: Raised if the given module_name does not correspond to a consensus implementation.
[ "Returns", "a", "consensus", "module", "by", "name", "." ]
python
train
33.1
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L113-L145
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: """ Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions """ # Where we are head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) # Where we want to be current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) # Are we where we want to be? return current_revision, head_revision
[ "def", "get_current_and_head_revision", "(", "database_url", ":", "str", ",", "alembic_config_filename", ":", "str", ",", "alembic_base_dir", ":", "str", "=", "None", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "# Where we are", "head_revision", "=", "get_head_revision_from_alembic", "(", "alembic_config_filename", "=", "alembic_config_filename", ",", "alembic_base_dir", "=", "alembic_base_dir", ",", "version_table", "=", "version_table", ")", "log", ".", "info", "(", "\"Intended database version: {}\"", ",", "head_revision", ")", "# Where we want to be", "current_revision", "=", "get_current_revision", "(", "database_url", "=", "database_url", ",", "version_table", "=", "version_table", ")", "log", ".", "info", "(", "\"Current database version: {}\"", ",", "current_revision", ")", "# Are we where we want to be?", "return", "current_revision", ",", "head_revision" ]
Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
[ "Returns", "a", "tuple", "of", "(", "current_revision", "head_revision", ")", ";", "see", ":", "func", ":", "get_current_revision", "and", ":", "func", ":", "get_head_revision_from_alembic", "." ]
python
train
35.939394
DataONEorg/d1_python
gmn/src/d1_gmn/app/scimeta.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/scimeta.py#L30-L64
def assert_valid(sysmeta_pyxb, pid): """Validate file at {sciobj_path} against schema selected via formatId and raise InvalidRequest if invalid. Validation is only performed when: - SciMeta validation is enabled - and Object size is below size limit for validation - and formatId designates object as a Science Metadata object which is recognized and parsed by DataONE CNs - and XML Schema (XSD) files for formatId are present on local system """ if not (_is_validation_enabled() and _is_installed_scimeta_format_id(sysmeta_pyxb)): return if _is_above_size_limit(sysmeta_pyxb): if _is_action_accept(): return else: raise d1_common.types.exceptions.InvalidRequest( 0, 'Science Metadata file is above size limit for validation and this ' 'node has been configured to reject unvalidated Science Metadata ' 'files. For more information, see the SCIMETA_VALIDATE* settings. ' 'size={} size_limit={}'.format( sysmeta_pyxb.size, django.conf.settings.SCIMETA_VALIDATION_MAX_SIZE ), ) with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid_ctx(pid) as sciobj_file: try: d1_scimeta.xml_schema.validate(sysmeta_pyxb.formatId, sciobj_file.read()) except d1_scimeta.xml_schema.SciMetaValidationError as e: raise d1_common.types.exceptions.InvalidRequest(0, str(e))
[ "def", "assert_valid", "(", "sysmeta_pyxb", ",", "pid", ")", ":", "if", "not", "(", "_is_validation_enabled", "(", ")", "and", "_is_installed_scimeta_format_id", "(", "sysmeta_pyxb", ")", ")", ":", "return", "if", "_is_above_size_limit", "(", "sysmeta_pyxb", ")", ":", "if", "_is_action_accept", "(", ")", ":", "return", "else", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Science Metadata file is above size limit for validation and this '", "'node has been configured to reject unvalidated Science Metadata '", "'files. For more information, see the SCIMETA_VALIDATE* settings. '", "'size={} size_limit={}'", ".", "format", "(", "sysmeta_pyxb", ".", "size", ",", "django", ".", "conf", ".", "settings", ".", "SCIMETA_VALIDATION_MAX_SIZE", ")", ",", ")", "with", "d1_gmn", ".", "app", ".", "sciobj_store", ".", "open_sciobj_file_by_pid_ctx", "(", "pid", ")", "as", "sciobj_file", ":", "try", ":", "d1_scimeta", ".", "xml_schema", ".", "validate", "(", "sysmeta_pyxb", ".", "formatId", ",", "sciobj_file", ".", "read", "(", ")", ")", "except", "d1_scimeta", ".", "xml_schema", ".", "SciMetaValidationError", "as", "e", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "str", "(", "e", ")", ")" ]
Validate file at {sciobj_path} against schema selected via formatId and raise InvalidRequest if invalid. Validation is only performed when: - SciMeta validation is enabled - and Object size is below size limit for validation - and formatId designates object as a Science Metadata object which is recognized and parsed by DataONE CNs - and XML Schema (XSD) files for formatId are present on local system
[ "Validate", "file", "at", "{", "sciobj_path", "}", "against", "schema", "selected", "via", "formatId", "and", "raise", "InvalidRequest", "if", "invalid", "." ]
python
train
42.457143
LegoStormtroopr/django-spaghetti-and-meatballs
django_spaghetti/views.py
https://github.com/LegoStormtroopr/django-spaghetti-and-meatballs/blob/19240f0faeddb0e6fdd9e657cb1565d78bf43f10/django_spaghetti/views.py#L155-L176
def get_node_label(self, model): """ Defines how labels are constructed from models. Default - uses verbose name, lines breaks where sensible """ if model.is_proxy: label = "(P) %s" % (model.name.title()) else: label = "%s" % (model.name.title()) line = "" new_label = [] for w in label.split(" "): if len(line + w) > 15: new_label.append(line) line = w else: line += " " line += w new_label.append(line) return "\n".join(new_label)
[ "def", "get_node_label", "(", "self", ",", "model", ")", ":", "if", "model", ".", "is_proxy", ":", "label", "=", "\"(P) %s\"", "%", "(", "model", ".", "name", ".", "title", "(", ")", ")", "else", ":", "label", "=", "\"%s\"", "%", "(", "model", ".", "name", ".", "title", "(", ")", ")", "line", "=", "\"\"", "new_label", "=", "[", "]", "for", "w", "in", "label", ".", "split", "(", "\" \"", ")", ":", "if", "len", "(", "line", "+", "w", ")", ">", "15", ":", "new_label", ".", "append", "(", "line", ")", "line", "=", "w", "else", ":", "line", "+=", "\" \"", "line", "+=", "w", "new_label", ".", "append", "(", "line", ")", "return", "\"\\n\"", ".", "join", "(", "new_label", ")" ]
Defines how labels are constructed from models. Default - uses verbose name, lines breaks where sensible
[ "Defines", "how", "labels", "are", "constructed", "from", "models", ".", "Default", "-", "uses", "verbose", "name", "lines", "breaks", "where", "sensible" ]
python
train
27.863636
kstaniek/condoor
condoor/actions.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/actions.py#L134-L140
def a_unexpected_prompt(ctx): """Provide message when received humphost prompt.""" prompt = ctx.ctrl.match.group(0) ctx.msg = "Received the jump host prompt: '{}'".format(prompt) ctx.device.connected = False ctx.finished = True raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname)
[ "def", "a_unexpected_prompt", "(", "ctx", ")", ":", "prompt", "=", "ctx", ".", "ctrl", ".", "match", ".", "group", "(", "0", ")", "ctx", ".", "msg", "=", "\"Received the jump host prompt: '{}'\"", ".", "format", "(", "prompt", ")", "ctx", ".", "device", ".", "connected", "=", "False", "ctx", ".", "finished", "=", "True", "raise", "ConnectionError", "(", "\"Unable to connect to the device.\"", ",", "ctx", ".", "ctrl", ".", "hostname", ")" ]
Provide message when received humphost prompt.
[ "Provide", "message", "when", "received", "humphost", "prompt", "." ]
python
train
46
necaris/python3-openid
openid/extensions/draft/pape5.py
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/draft/pape5.py#L73-L80
def _generateAlias(self): """Return an unused auth level alias""" for i in range(1000): alias = 'cust%d' % (i, ) if alias not in self.auth_level_aliases: return alias raise RuntimeError('Could not find an unused alias (tried 1000!)')
[ "def", "_generateAlias", "(", "self", ")", ":", "for", "i", "in", "range", "(", "1000", ")", ":", "alias", "=", "'cust%d'", "%", "(", "i", ",", ")", "if", "alias", "not", "in", "self", ".", "auth_level_aliases", ":", "return", "alias", "raise", "RuntimeError", "(", "'Could not find an unused alias (tried 1000!)'", ")" ]
Return an unused auth level alias
[ "Return", "an", "unused", "auth", "level", "alias" ]
python
train
36.375
apple/turicreate
src/unity/python/turicreate/data_structures/sarray.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L4286-L4313
def cumulative_min(self): """ Return the cumulative minimum value of the elements in the SArray. Returns an SArray where each element in the output corresponds to the minimum value of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float). Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_min() dtype: int rows: 3 [1, 1, 1, 1, 0] """ from .. import extensions agg_op = "__builtin__cum_min__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
[ "def", "cumulative_min", "(", "self", ")", ":", "from", ".", ".", "import", "extensions", "agg_op", "=", "\"__builtin__cum_min__\"", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "builtin_cumulative_aggregate", "(", "agg_op", ")", ")" ]
Return the cumulative minimum value of the elements in the SArray. Returns an SArray where each element in the output corresponds to the minimum value of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float). Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_min() dtype: int rows: 3 [1, 1, 1, 1, 0]
[ "Return", "the", "cumulative", "minimum", "value", "of", "the", "elements", "in", "the", "SArray", "." ]
python
train
29.821429
bkabrda/flask-whooshee
flask_whooshee.py
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L433-L454
def on_commit(self, changes): """Method that gets called when a model is changed. This serves to do the actual index writing. """ if _get_config(self)['enable_indexing'] is False: return None for wh in self.whoosheers: if not wh.auto_update: continue writer = None for change in changes: if change[0].__class__ in wh.models: method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower()) method = getattr(wh, method_name, None) if method: if not writer: writer = type(self).get_or_create_index(_get_app(self), wh).\ writer(timeout=_get_config(self)['writer_timeout']) method(writer, change[0]) if writer: writer.commit()
[ "def", "on_commit", "(", "self", ",", "changes", ")", ":", "if", "_get_config", "(", "self", ")", "[", "'enable_indexing'", "]", "is", "False", ":", "return", "None", "for", "wh", "in", "self", ".", "whoosheers", ":", "if", "not", "wh", ".", "auto_update", ":", "continue", "writer", "=", "None", "for", "change", "in", "changes", ":", "if", "change", "[", "0", "]", ".", "__class__", "in", "wh", ".", "models", ":", "method_name", "=", "'{0}_{1}'", ".", "format", "(", "change", "[", "1", "]", ",", "change", "[", "0", "]", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ")", "method", "=", "getattr", "(", "wh", ",", "method_name", ",", "None", ")", "if", "method", ":", "if", "not", "writer", ":", "writer", "=", "type", "(", "self", ")", ".", "get_or_create_index", "(", "_get_app", "(", "self", ")", ",", "wh", ")", ".", "writer", "(", "timeout", "=", "_get_config", "(", "self", ")", "[", "'writer_timeout'", "]", ")", "method", "(", "writer", ",", "change", "[", "0", "]", ")", "if", "writer", ":", "writer", ".", "commit", "(", ")" ]
Method that gets called when a model is changed. This serves to do the actual index writing.
[ "Method", "that", "gets", "called", "when", "a", "model", "is", "changed", ".", "This", "serves", "to", "do", "the", "actual", "index", "writing", "." ]
python
train
42.409091
Microsoft/nni
src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py#L236-L278
def generate_parameters(self, parameter_id): """Returns a dict of trial (hyper-)parameters, as a serializable object. Parameters ---------- parameter_id : int Returns ------- config : dict """ if not self.population: raise RuntimeError('The population is empty') pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) total_config = indiv.config else: random.shuffle(self.population) if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2paramater( self.searchspace_json, is_rand, self.random_state, self.population[0].config) self.population.pop(1) # remove "_index" from config and save params-id total_config = config self.total_data[parameter_id] = total_config config = _split_index(total_config) return config
[ "def", "generate_parameters", "(", "self", ",", "parameter_id", ")", ":", "if", "not", "self", ".", "population", ":", "raise", "RuntimeError", "(", "'The population is empty'", ")", "pos", "=", "-", "1", "for", "i", "in", "range", "(", "len", "(", "self", ".", "population", ")", ")", ":", "if", "self", ".", "population", "[", "i", "]", ".", "result", "is", "None", ":", "pos", "=", "i", "break", "if", "pos", "!=", "-", "1", ":", "indiv", "=", "copy", ".", "deepcopy", "(", "self", ".", "population", "[", "pos", "]", ")", "self", ".", "population", ".", "pop", "(", "pos", ")", "total_config", "=", "indiv", ".", "config", "else", ":", "random", ".", "shuffle", "(", "self", ".", "population", ")", "if", "self", ".", "population", "[", "0", "]", ".", "result", "<", "self", ".", "population", "[", "1", "]", ".", "result", ":", "self", ".", "population", "[", "0", "]", "=", "self", ".", "population", "[", "1", "]", "# mutation", "space", "=", "json2space", "(", "self", ".", "searchspace_json", ",", "self", ".", "population", "[", "0", "]", ".", "config", ")", "is_rand", "=", "dict", "(", ")", "mutation_pos", "=", "space", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "space", ")", "-", "1", ")", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "space", ")", ")", ":", "is_rand", "[", "self", ".", "space", "[", "i", "]", "]", "=", "(", "self", ".", "space", "[", "i", "]", "==", "mutation_pos", ")", "config", "=", "json2paramater", "(", "self", ".", "searchspace_json", ",", "is_rand", ",", "self", ".", "random_state", ",", "self", ".", "population", "[", "0", "]", ".", "config", ")", "self", ".", "population", ".", "pop", "(", "1", ")", "# remove \"_index\" from config and save params-id", "total_config", "=", "config", "self", ".", "total_data", "[", "parameter_id", "]", "=", "total_config", "config", "=", "_split_index", "(", "total_config", ")", "return", "config" ]
Returns a dict of trial (hyper-)parameters, as a serializable object. Parameters ---------- parameter_id : int Returns ------- config : dict
[ "Returns", "a", "dict", "of", "trial", "(", "hyper", "-", ")", "parameters", "as", "a", "serializable", "object", "." ]
python
train
35.767442
juju/python-libjuju
juju/client/_client.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client.py#L22-L36
def lookup_facade(name, version): """ Given a facade name and version, attempt to pull that facade out of the correct client<version>.py file. """ for _version in range(int(version), 0, -1): try: facade = getattr(CLIENTS[str(_version)], name) return facade except (KeyError, AttributeError): continue else: raise ImportError("No supported version for facade: " "{}".format(name))
[ "def", "lookup_facade", "(", "name", ",", "version", ")", ":", "for", "_version", "in", "range", "(", "int", "(", "version", ")", ",", "0", ",", "-", "1", ")", ":", "try", ":", "facade", "=", "getattr", "(", "CLIENTS", "[", "str", "(", "_version", ")", "]", ",", "name", ")", "return", "facade", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "continue", "else", ":", "raise", "ImportError", "(", "\"No supported version for facade: \"", "\"{}\"", ".", "format", "(", "name", ")", ")" ]
Given a facade name and version, attempt to pull that facade out of the correct client<version>.py file.
[ "Given", "a", "facade", "name", "and", "version", "attempt", "to", "pull", "that", "facade", "out", "of", "the", "correct", "client<version", ">", ".", "py", "file", "." ]
python
train
31.733333
gevious/flask_slither
flask_slither/resources.py
https://github.com/gevious/flask_slither/blob/bf1fd1e58224c19883f4b19c5f727f47ee9857da/flask_slither/resources.py#L173-L188
def merge_record_data(self, changes, orig_record=None): """This method merges PATCH requests with the db record to ensure no data is lost. In addition, it is also a hook for other fields to be overwritten, to ensure immutable fields aren't changed by a request.""" current_app.logger.info("Merging request data with db record") current_app.logger.debug("orig_record: {}".format(orig_record)) current_app.logger.debug("Changes".format(changes)) final_record = changes if request.method == 'PATCH': final_record = dict(orig_record) final_record.update(changes) elif request.method == 'PUT': if '_id' in orig_record: final_record['_id'] = orig_record['_id'] return final_record
[ "def", "merge_record_data", "(", "self", ",", "changes", ",", "orig_record", "=", "None", ")", ":", "current_app", ".", "logger", ".", "info", "(", "\"Merging request data with db record\"", ")", "current_app", ".", "logger", ".", "debug", "(", "\"orig_record: {}\"", ".", "format", "(", "orig_record", ")", ")", "current_app", ".", "logger", ".", "debug", "(", "\"Changes\"", ".", "format", "(", "changes", ")", ")", "final_record", "=", "changes", "if", "request", ".", "method", "==", "'PATCH'", ":", "final_record", "=", "dict", "(", "orig_record", ")", "final_record", ".", "update", "(", "changes", ")", "elif", "request", ".", "method", "==", "'PUT'", ":", "if", "'_id'", "in", "orig_record", ":", "final_record", "[", "'_id'", "]", "=", "orig_record", "[", "'_id'", "]", "return", "final_record" ]
This method merges PATCH requests with the db record to ensure no data is lost. In addition, it is also a hook for other fields to be overwritten, to ensure immutable fields aren't changed by a request.
[ "This", "method", "merges", "PATCH", "requests", "with", "the", "db", "record", "to", "ensure", "no", "data", "is", "lost", ".", "In", "addition", "it", "is", "also", "a", "hook", "for", "other", "fields", "to", "be", "overwritten", "to", "ensure", "immutable", "fields", "aren", "t", "changed", "by", "a", "request", "." ]
python
train
50.5
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L2146-L2171
def _string_like(self, patterns): """ Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use % as a multiple-character wildcard or _ (underscore) as a single-character wildcard. Use re_search or rlike for regex-based matching. Parameters ---------- pattern : str or List[str] A pattern or list of patterns to match. If `pattern` is a list, then if **any** pattern matches the input then the corresponding row in the output is ``True``. Returns ------- matched : ir.BooleanColumn """ return functools.reduce( operator.or_, ( ops.StringSQLLike(self, pattern).to_expr() for pattern in util.promote_list(patterns) ), )
[ "def", "_string_like", "(", "self", ",", "patterns", ")", ":", "return", "functools", ".", "reduce", "(", "operator", ".", "or_", ",", "(", "ops", ".", "StringSQLLike", "(", "self", ",", "pattern", ")", ".", "to_expr", "(", ")", "for", "pattern", "in", "util", ".", "promote_list", "(", "patterns", ")", ")", ",", ")" ]
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use % as a multiple-character wildcard or _ (underscore) as a single-character wildcard. Use re_search or rlike for regex-based matching. Parameters ---------- pattern : str or List[str] A pattern or list of patterns to match. If `pattern` is a list, then if **any** pattern matches the input then the corresponding row in the output is ``True``. Returns ------- matched : ir.BooleanColumn
[ "Wildcard", "fuzzy", "matching", "function", "equivalent", "to", "the", "SQL", "LIKE", "directive", ".", "Use", "%", "as", "a", "multiple", "-", "character", "wildcard", "or", "_", "(", "underscore", ")", "as", "a", "single", "-", "character", "wildcard", "." ]
python
train
28.461538
Duke-GCB/DukeDSClient
ddsc/core/ddsapi.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ddsapi.py#L565-L576
def update_file(self, file_id, upload_id): """ Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label. :param file_id: str uuid of file :param upload_id: str uuid of the upload where all the file chunks where uploaded :param label: str short display label for the file :return: requests.Response containing the successful result """ put_data = { "upload[id]": upload_id, } return self._put("/files/" + file_id, put_data, content_type=ContentType.form)
[ "def", "update_file", "(", "self", ",", "file_id", ",", "upload_id", ")", ":", "put_data", "=", "{", "\"upload[id]\"", ":", "upload_id", ",", "}", "return", "self", ".", "_put", "(", "\"/files/\"", "+", "file_id", ",", "put_data", ",", "content_type", "=", "ContentType", ".", "form", ")" ]
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label. :param file_id: str uuid of file :param upload_id: str uuid of the upload where all the file chunks where uploaded :param label: str short display label for the file :return: requests.Response containing the successful result
[ "Send", "PUT", "request", "to", "/", "files", "/", "{", "file_id", "}", "to", "update", "the", "file", "contents", "to", "upload_id", "and", "sets", "a", "label", ".", ":", "param", "file_id", ":", "str", "uuid", "of", "file", ":", "param", "upload_id", ":", "str", "uuid", "of", "the", "upload", "where", "all", "the", "file", "chunks", "where", "uploaded", ":", "param", "label", ":", "str", "short", "display", "label", "for", "the", "file", ":", "return", ":", "requests", ".", "Response", "containing", "the", "successful", "result" ]
python
train
47.666667
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L648-L664
def fix_e125(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent = len(_get_indentation(target)) modified_lines = [] while len(_get_indentation(self.source[line_index])) >= indent: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) modified_lines.append(1 + line_index) # Line indexed at 1. line_index -= 1 return modified_lines
[ "def", "fix_e125", "(", "self", ",", "result", ")", ":", "num_indent_spaces", "=", "int", "(", "result", "[", "'info'", "]", ".", "split", "(", ")", "[", "1", "]", ")", "line_index", "=", "result", "[", "'line'", "]", "-", "1", "target", "=", "self", ".", "source", "[", "line_index", "]", "spaces_to_add", "=", "num_indent_spaces", "-", "len", "(", "_get_indentation", "(", "target", ")", ")", "indent", "=", "len", "(", "_get_indentation", "(", "target", ")", ")", "modified_lines", "=", "[", "]", "while", "len", "(", "_get_indentation", "(", "self", ".", "source", "[", "line_index", "]", ")", ")", ">=", "indent", ":", "self", ".", "source", "[", "line_index", "]", "=", "(", "' '", "*", "spaces_to_add", "+", "self", ".", "source", "[", "line_index", "]", ")", "modified_lines", ".", "append", "(", "1", "+", "line_index", ")", "# Line indexed at 1.", "line_index", "-=", "1", "return", "modified_lines" ]
Fix indentation undistinguish from the next logical line.
[ "Fix", "indentation", "undistinguish", "from", "the", "next", "logical", "line", "." ]
python
train
41.294118
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L454-L465
def global_iterator(self): """ Return global iterator sympy expression """ global_iterator = sympy.Integer(0) total_length = sympy.Integer(1) for var_name, start, end, incr in reversed(self._loop_stack): loop_var = symbol_pos_int(var_name) length = end - start # FIXME is incr handled correct here? global_iterator += (loop_var - start) * total_length total_length *= length return global_iterator
[ "def", "global_iterator", "(", "self", ")", ":", "global_iterator", "=", "sympy", ".", "Integer", "(", "0", ")", "total_length", "=", "sympy", ".", "Integer", "(", "1", ")", "for", "var_name", ",", "start", ",", "end", ",", "incr", "in", "reversed", "(", "self", ".", "_loop_stack", ")", ":", "loop_var", "=", "symbol_pos_int", "(", "var_name", ")", "length", "=", "end", "-", "start", "# FIXME is incr handled correct here?", "global_iterator", "+=", "(", "loop_var", "-", "start", ")", "*", "total_length", "total_length", "*=", "length", "return", "global_iterator" ]
Return global iterator sympy expression
[ "Return", "global", "iterator", "sympy", "expression" ]
python
test
40.916667
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L1647-L1654
def getImportPeople(self): """ Return an L{ImportPeopleWidget} which is a child of this fragment and which will add people to C{self.organizer}. """ fragment = ImportPeopleWidget(self.organizer) fragment.setFragmentParent(self) return fragment
[ "def", "getImportPeople", "(", "self", ")", ":", "fragment", "=", "ImportPeopleWidget", "(", "self", ".", "organizer", ")", "fragment", ".", "setFragmentParent", "(", "self", ")", "return", "fragment" ]
Return an L{ImportPeopleWidget} which is a child of this fragment and which will add people to C{self.organizer}.
[ "Return", "an", "L", "{", "ImportPeopleWidget", "}", "which", "is", "a", "child", "of", "this", "fragment", "and", "which", "will", "add", "people", "to", "C", "{", "self", ".", "organizer", "}", "." ]
python
train
36.5
pypa/pipenv
pipenv/patched/notpip/_vendor/html5lib/serializer.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/serializer.py#L375-L398
def render(self, treewalker, encoding=None): """Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>' """ if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker)))
[ "def", "render", "(", "self", ",", "treewalker", ",", "encoding", "=", "None", ")", ":", "if", "encoding", ":", "return", "b\"\"", ".", "join", "(", "list", "(", "self", ".", "serialize", "(", "treewalker", ",", "encoding", ")", ")", ")", "else", ":", "return", "\"\"", ".", "join", "(", "list", "(", "self", ".", "serialize", "(", "treewalker", ")", ")", ")" ]
Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>'
[ "Serializes", "the", "stream", "from", "the", "treewalker", "into", "a", "string" ]
python
train
34.416667
pkgw/pwkit
pwkit/io.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L308-L323
def make_relative (self, other): """Return a new path that is the equivalent of this one relative to the path *other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is not a sub-path of *other*; instead, it will use ``..`` to build a relative path. This can result in invalid relative paths if *other* contains a directory symbolic link. If *self* is an absolute path, it is returned unmodified. """ if self.is_absolute (): return self from os.path import relpath other = self.__class__ (other) return self.__class__ (relpath (text_type (self), text_type (other)))
[ "def", "make_relative", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_absolute", "(", ")", ":", "return", "self", "from", "os", ".", "path", "import", "relpath", "other", "=", "self", ".", "__class__", "(", "other", ")", "return", "self", ".", "__class__", "(", "relpath", "(", "text_type", "(", "self", ")", ",", "text_type", "(", "other", ")", ")", ")" ]
Return a new path that is the equivalent of this one relative to the path *other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is not a sub-path of *other*; instead, it will use ``..`` to build a relative path. This can result in invalid relative paths if *other* contains a directory symbolic link. If *self* is an absolute path, it is returned unmodified.
[ "Return", "a", "new", "path", "that", "is", "the", "equivalent", "of", "this", "one", "relative", "to", "the", "path", "*", "other", "*", ".", "Unlike", ":", "meth", ":", "relative_to", "this", "will", "not", "throw", "an", "error", "if", "*", "self", "*", "is", "not", "a", "sub", "-", "path", "of", "*", "other", "*", ";", "instead", "it", "will", "use", "..", "to", "build", "a", "relative", "path", ".", "This", "can", "result", "in", "invalid", "relative", "paths", "if", "*", "other", "*", "contains", "a", "directory", "symbolic", "link", "." ]
python
train
42.0625
aleju/imgaug
imgaug/augmentables/polys.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L593-L636
def extract_from_image(self, image): """ Extract the image pixels within the polygon. This function will zero-pad the image if the polygon is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the polygon. Returns ------- result : (H',W') ndarray or (H',W',C) ndarray Pixels within the polygon. Zero-padded if the polygon is partially/fully outside of the image. """ ia.do_assert(image.ndim in [2, 3]) if len(self.exterior) <= 2: raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.") bb = self.to_bounding_box() bb_area = bb.extract_from_image(image) if self.is_out_of_image(image, fully=True, partly=False): return bb_area xx = self.xx_int yy = self.yy_int xx_mask = xx - np.min(xx) yy_mask = yy - np.min(yy) height_mask = np.max(yy_mask) width_mask = np.max(xx_mask) rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask)) mask = np.zeros((height_mask, width_mask), dtype=np.bool) mask[rr_face, cc_face] = True if image.ndim == 3: mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2])) return bb_area * mask
[ "def", "extract_from_image", "(", "self", ",", "image", ")", ":", "ia", ".", "do_assert", "(", "image", ".", "ndim", "in", "[", "2", ",", "3", "]", ")", "if", "len", "(", "self", ".", "exterior", ")", "<=", "2", ":", "raise", "Exception", "(", "\"Polygon must be made up of at least 3 points to extract its area from an image.\"", ")", "bb", "=", "self", ".", "to_bounding_box", "(", ")", "bb_area", "=", "bb", ".", "extract_from_image", "(", "image", ")", "if", "self", ".", "is_out_of_image", "(", "image", ",", "fully", "=", "True", ",", "partly", "=", "False", ")", ":", "return", "bb_area", "xx", "=", "self", ".", "xx_int", "yy", "=", "self", ".", "yy_int", "xx_mask", "=", "xx", "-", "np", ".", "min", "(", "xx", ")", "yy_mask", "=", "yy", "-", "np", ".", "min", "(", "yy", ")", "height_mask", "=", "np", ".", "max", "(", "yy_mask", ")", "width_mask", "=", "np", ".", "max", "(", "xx_mask", ")", "rr_face", ",", "cc_face", "=", "skimage", ".", "draw", ".", "polygon", "(", "yy_mask", ",", "xx_mask", ",", "shape", "=", "(", "height_mask", ",", "width_mask", ")", ")", "mask", "=", "np", ".", "zeros", "(", "(", "height_mask", ",", "width_mask", ")", ",", "dtype", "=", "np", ".", "bool", ")", "mask", "[", "rr_face", ",", "cc_face", "]", "=", "True", "if", "image", ".", "ndim", "==", "3", ":", "mask", "=", "np", ".", "tile", "(", "mask", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ",", "(", "1", ",", "1", ",", "image", ".", "shape", "[", "2", "]", ")", ")", "return", "bb_area", "*", "mask" ]
Extract the image pixels within the polygon. This function will zero-pad the image if the polygon is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the polygon. Returns ------- result : (H',W') ndarray or (H',W',C) ndarray Pixels within the polygon. Zero-padded if the polygon is partially/fully outside of the image.
[ "Extract", "the", "image", "pixels", "within", "the", "polygon", "." ]
python
valid
33
gawel/irc3
irc3/utils.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/utils.py#L240-L262
def as_list(value): """clever string spliting: .. code-block:: python >>> print(as_list('value')) ['value'] >>> print(as_list('v1 v2')) ['v1', 'v2'] >>> print(as_list(None)) [] >>> print(as_list(['v1'])) ['v1'] """ if isinstance(value, (list, tuple)): return value if not value: return [] for c in '\n ': if c in value: value = value.split(c) return [v.strip() for v in value if v.strip()] return [value]
[ "def", "as_list", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "value", "if", "not", "value", ":", "return", "[", "]", "for", "c", "in", "'\\n '", ":", "if", "c", "in", "value", ":", "value", "=", "value", ".", "split", "(", "c", ")", "return", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", "if", "v", ".", "strip", "(", ")", "]", "return", "[", "value", "]" ]
clever string spliting: .. code-block:: python >>> print(as_list('value')) ['value'] >>> print(as_list('v1 v2')) ['v1', 'v2'] >>> print(as_list(None)) [] >>> print(as_list(['v1'])) ['v1']
[ "clever", "string", "spliting", ":" ]
python
train
22.782609
jobovy/galpy
galpy/potential/DoubleExponentialDiskPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/DoubleExponentialDiskPotential.py#L152-L183
def _Rforce(self,R,z,phi=0.,t=0.): """ NAME: Rforce PURPOSE: evaluate radial force K_R (R,z) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: K_R (R,z) HISTORY: 2010-04-16 - Written - Bovy (NYU) DOCTEST: """ if True: if isinstance(R,nu.ndarray): if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z out= nu.array([self._Rforce(rr,zz) for rr,zz in zip(R,z)]) return out if (R > 16.*self._hr or R > 6.) and hasattr(self,'_kp'): return self._kp.Rforce(R,z) if R < 1.: R4max= 1. else: R4max= R kmax= self._kmaxFac*self._beta kmax= 2.*self._kmaxFac*self._beta maxj1zeroIndx= nu.argmin((self._j1zeros-kmax*R4max)**2.) #close enough ks= nu.array([0.5*(self._glx+1.)*self._dj1zeros[ii+1] + self._j1zeros[ii] for ii in range(maxj1zeroIndx)]).flatten() weights= nu.array([self._glw*self._dj1zeros[ii+1] for ii in range(maxj1zeroIndx)]).flatten() evalInt= ks*special.jn(1,ks*R)*(self._alpha**2.+ks**2.)**-1.5*(self._beta*nu.exp(-ks*nu.fabs(z))-ks*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks**2.) return -2.*nu.pi*self._alpha*nu.sum(weights*evalInt)
[ "def", "_Rforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "if", "True", ":", "if", "isinstance", "(", "R", ",", "nu", ".", "ndarray", ")", ":", "if", "not", "isinstance", "(", "z", ",", "nu", ".", "ndarray", ")", ":", "z", "=", "nu", ".", "ones_like", "(", "R", ")", "*", "z", "out", "=", "nu", ".", "array", "(", "[", "self", ".", "_Rforce", "(", "rr", ",", "zz", ")", "for", "rr", ",", "zz", "in", "zip", "(", "R", ",", "z", ")", "]", ")", "return", "out", "if", "(", "R", ">", "16.", "*", "self", ".", "_hr", "or", "R", ">", "6.", ")", "and", "hasattr", "(", "self", ",", "'_kp'", ")", ":", "return", "self", ".", "_kp", ".", "Rforce", "(", "R", ",", "z", ")", "if", "R", "<", "1.", ":", "R4max", "=", "1.", "else", ":", "R4max", "=", "R", "kmax", "=", "self", ".", "_kmaxFac", "*", "self", ".", "_beta", "kmax", "=", "2.", "*", "self", ".", "_kmaxFac", "*", "self", ".", "_beta", "maxj1zeroIndx", "=", "nu", ".", "argmin", "(", "(", "self", ".", "_j1zeros", "-", "kmax", "*", "R4max", ")", "**", "2.", ")", "#close enough", "ks", "=", "nu", ".", "array", "(", "[", "0.5", "*", "(", "self", ".", "_glx", "+", "1.", ")", "*", "self", ".", "_dj1zeros", "[", "ii", "+", "1", "]", "+", "self", ".", "_j1zeros", "[", "ii", "]", "for", "ii", "in", "range", "(", "maxj1zeroIndx", ")", "]", ")", ".", "flatten", "(", ")", "weights", "=", "nu", ".", "array", "(", "[", "self", ".", "_glw", "*", "self", ".", "_dj1zeros", "[", "ii", "+", "1", "]", "for", "ii", "in", "range", "(", "maxj1zeroIndx", ")", "]", ")", ".", "flatten", "(", ")", "evalInt", "=", "ks", "*", "special", ".", "jn", "(", "1", ",", "ks", "*", "R", ")", "*", "(", "self", ".", "_alpha", "**", "2.", "+", "ks", "**", "2.", ")", "**", "-", "1.5", "*", "(", "self", ".", "_beta", "*", "nu", ".", "exp", "(", "-", "ks", "*", "nu", ".", "fabs", "(", "z", ")", ")", "-", "ks", "*", "nu", ".", "exp", "(", "-", "self", ".", "_beta", "*", "nu", ".", "fabs", "(", "z", ")", ")", ")", "/", "(", "self", ".", "_beta", "**", "2.", "-", "ks", "**", "2.", ")", "return", "-", "2.", "*", "nu", ".", "pi", "*", "self", ".", "_alpha", "*", "nu", ".", "sum", "(", "weights", "*", "evalInt", ")" ]
NAME: Rforce PURPOSE: evaluate radial force K_R (R,z) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: K_R (R,z) HISTORY: 2010-04-16 - Written - Bovy (NYU) DOCTEST:
[ "NAME", ":", "Rforce", "PURPOSE", ":", "evaluate", "radial", "force", "K_R", "(", "R", "z", ")", "INPUT", ":", "R", "-", "Cylindrical", "Galactocentric", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "K_R", "(", "R", "z", ")", "HISTORY", ":", "2010", "-", "04", "-", "16", "-", "Written", "-", "Bovy", "(", "NYU", ")", "DOCTEST", ":" ]
python
train
43.8125
sashka/flask-googleauth
flask_googleauth.py
https://github.com/sashka/flask-googleauth/blob/4e481d645f1bb22124a6d79c7881746004cf4369/flask_googleauth.py#L292-L304
def required(self, fn): """Request decorator. Forces authentication.""" @functools.wraps(fn) def decorated(*args, **kwargs): if (not self._check_auth() # Don't try to force authentication if the request is part # of the authentication process - otherwise we end up in a # loop. and request.blueprint != self.blueprint.name): return redirect(url_for("%s.login" % self.blueprint.name, next=request.url)) return fn(*args, **kwargs) return decorated
[ "def", "required", "(", "self", ",", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "(", "not", "self", ".", "_check_auth", "(", ")", "# Don't try to force authentication if the request is part", "# of the authentication process - otherwise we end up in a", "# loop.", "and", "request", ".", "blueprint", "!=", "self", ".", "blueprint", ".", "name", ")", ":", "return", "redirect", "(", "url_for", "(", "\"%s.login\"", "%", "self", ".", "blueprint", ".", "name", ",", "next", "=", "request", ".", "url", ")", ")", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorated" ]
Request decorator. Forces authentication.
[ "Request", "decorator", ".", "Forces", "authentication", "." ]
python
train
46.538462
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/ext/dtcompat.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/ext/dtcompat.py#L2112-L2176
def DocFileSuite(*paths, **kw): """A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files. """ suite = unittest.TestSuite() # We do this here so that _normalize_module is called at the right # level. If it were called in DocFileTest, then this function # would be the caller and we might guess the package incorrectly. if kw.get('module_relative', True): kw['package'] = _normalize_module(kw.get('package')) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite
[ "def", "DocFileSuite", "(", "*", "paths", ",", "*", "*", "kw", ")", ":", "suite", "=", "unittest", ".", "TestSuite", "(", ")", "# We do this here so that _normalize_module is called at the right", "# level. If it were called in DocFileTest, then this function", "# would be the caller and we might guess the package incorrectly.", "if", "kw", ".", "get", "(", "'module_relative'", ",", "True", ")", ":", "kw", "[", "'package'", "]", "=", "_normalize_module", "(", "kw", ".", "get", "(", "'package'", ")", ")", "for", "path", "in", "paths", ":", "suite", ".", "addTest", "(", "DocFileTest", "(", "path", ",", "*", "*", "kw", ")", ")", "return", "suite" ]
A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files.
[ "A", "unittest", "suite", "for", "one", "or", "more", "doctest", "files", "." ]
python
test
38.769231
PyCQA/pydocstyle
src/pydocstyle/parser.py
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/parser.py#L562-L571
def parse_skip_comment(self): """Parse a definition comment for noqa skips.""" skipped_error_codes = '' if self.current.kind == tk.COMMENT: if 'noqa: ' in self.current.value: skipped_error_codes = ''.join( self.current.value.split('noqa: ')[1:]) elif self.current.value.startswith('# noqa'): skipped_error_codes = 'all' return skipped_error_codes
[ "def", "parse_skip_comment", "(", "self", ")", ":", "skipped_error_codes", "=", "''", "if", "self", ".", "current", ".", "kind", "==", "tk", ".", "COMMENT", ":", "if", "'noqa: '", "in", "self", ".", "current", ".", "value", ":", "skipped_error_codes", "=", "''", ".", "join", "(", "self", ".", "current", ".", "value", ".", "split", "(", "'noqa: '", ")", "[", "1", ":", "]", ")", "elif", "self", ".", "current", ".", "value", ".", "startswith", "(", "'# noqa'", ")", ":", "skipped_error_codes", "=", "'all'", "return", "skipped_error_codes" ]
Parse a definition comment for noqa skips.
[ "Parse", "a", "definition", "comment", "for", "noqa", "skips", "." ]
python
train
44.6
osrg/ryu
ryu/lib/stplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/stplib.py#L780-L788
def up(self, role, root_priority, root_times): """ A port is started in the state of LISTEN. """ self.port_priority = root_priority self.port_times = root_times state = (PORT_STATE_LISTEN if self.config_enable else PORT_STATE_DISABLE) self._change_role(role) self._change_status(state)
[ "def", "up", "(", "self", ",", "role", ",", "root_priority", ",", "root_times", ")", ":", "self", ".", "port_priority", "=", "root_priority", "self", ".", "port_times", "=", "root_times", "state", "=", "(", "PORT_STATE_LISTEN", "if", "self", ".", "config_enable", "else", "PORT_STATE_DISABLE", ")", "self", ".", "_change_role", "(", "role", ")", "self", ".", "_change_status", "(", "state", ")" ]
A port is started in the state of LISTEN.
[ "A", "port", "is", "started", "in", "the", "state", "of", "LISTEN", "." ]
python
train
38.222222
robotpy/pyfrc
lib/pyfrc/sim/sim_manager.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/sim_manager.py#L74-L81
def set_joystick(self, x, y, n): """ Receives joystick values from the SnakeBoard x,y Coordinates n Robot number to give it to """ self.robots[n].set_joystick(x, y)
[ "def", "set_joystick", "(", "self", ",", "x", ",", "y", ",", "n", ")", ":", "self", ".", "robots", "[", "n", "]", ".", "set_joystick", "(", "x", ",", "y", ")" ]
Receives joystick values from the SnakeBoard x,y Coordinates n Robot number to give it to
[ "Receives", "joystick", "values", "from", "the", "SnakeBoard", "x", "y", "Coordinates", "n", "Robot", "number", "to", "give", "it", "to" ]
python
train
30.25
openego/ding0
ding0/core/network/grids.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/grids.py#L856-L863
def add_station(self, lv_station): """Adds a LV station to _station and grid graph if not already existing""" if not isinstance(lv_station, LVStationDing0): raise Exception('Given LV station is not a LVStationDing0 object.') if self._station is None: self._station = lv_station self.graph_add_node(lv_station) self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station)
[ "def", "add_station", "(", "self", ",", "lv_station", ")", ":", "if", "not", "isinstance", "(", "lv_station", ",", "LVStationDing0", ")", ":", "raise", "Exception", "(", "'Given LV station is not a LVStationDing0 object.'", ")", "if", "self", ".", "_station", "is", "None", ":", "self", ".", "_station", "=", "lv_station", "self", ".", "graph_add_node", "(", "lv_station", ")", "self", ".", "grid_district", ".", "lv_load_area", ".", "mv_grid_district", ".", "mv_grid", ".", "graph_add_node", "(", "lv_station", ")" ]
Adds a LV station to _station and grid graph if not already existing
[ "Adds", "a", "LV", "station", "to", "_station", "and", "grid", "graph", "if", "not", "already", "existing" ]
python
train
57.25
Radi85/Comment
comment/api/views.py
https://github.com/Radi85/Comment/blob/c3c46afe51228cd7ee4e04f5e6164fff1be3a5bc/comment/api/views.py#L34-L43
def get_queryset(self): ''' Parameters are already validated in the QuerySetPermission ''' model_type = self.request.GET.get("type") pk = self.request.GET.get("id") content_type_model = ContentType.objects.get(model=model_type.lower()) Model = content_type_model.model_class() model_obj = Model.objects.filter(id=pk).first() return Comment.objects.filter_by_object(model_obj)
[ "def", "get_queryset", "(", "self", ")", ":", "model_type", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "\"type\"", ")", "pk", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "\"id\"", ")", "content_type_model", "=", "ContentType", ".", "objects", ".", "get", "(", "model", "=", "model_type", ".", "lower", "(", ")", ")", "Model", "=", "content_type_model", ".", "model_class", "(", ")", "model_obj", "=", "Model", ".", "objects", ".", "filter", "(", "id", "=", "pk", ")", ".", "first", "(", ")", "return", "Comment", ".", "objects", ".", "filter_by_object", "(", "model_obj", ")" ]
Parameters are already validated in the QuerySetPermission
[ "Parameters", "are", "already", "validated", "in", "the", "QuerySetPermission" ]
python
train
43.8
KeplerGO/K2fov
K2fov/fov.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/fov.py#L361-L397
def getColRowWithinChannel(self, ra, dec, ch, wantZeroOffset=False, allowIllegalReturnValues=True): """Returns (col, row) given a (ra, dec) coordinate and channel number. """ # How close is a given ra/dec to the origin of a KeplerModule? x, y = self.defaultMap.skyToPix(ra, dec) kepModule = self.getChannelAsPolygon(ch) r = np.array([x[0],y[0]]) - kepModule.polygon[0, :] v1 = kepModule.polygon[1, :] - kepModule.polygon[0, :] v3 = kepModule.polygon[3, :] - kepModule.polygon[0, :] # Divide by |v|^2 because you're normalising v and r colFrac = np.dot(r, v1) / np.linalg.norm(v1)**2 rowFrac = np.dot(r, v3) / np.linalg.norm(v3)**2 # This is where it gets a little hairy. The channel "corners" # supplied to me actually represent points 5x5 pixels inside # the science array. Which isn't what you'd expect. # These magic numbers are the pixel numbers of the corner # edges given in fov.txt col = colFrac*(1106-17) + 17 row = rowFrac*(1038-25) + 25 if not allowIllegalReturnValues: if not self.colRowIsOnSciencePixel(col, row): msg = "Request position %7f %.7f " % (ra, dec) msg += "does not lie on science pixels for channel %i " % (ch) msg += "[ %.1f %.1f]" % (col, row) raise ValueError(msg) # Convert from zero-offset to one-offset coords if not wantZeroOffset: col += 1 row += 1 return (col, row)
[ "def", "getColRowWithinChannel", "(", "self", ",", "ra", ",", "dec", ",", "ch", ",", "wantZeroOffset", "=", "False", ",", "allowIllegalReturnValues", "=", "True", ")", ":", "# How close is a given ra/dec to the origin of a KeplerModule?", "x", ",", "y", "=", "self", ".", "defaultMap", ".", "skyToPix", "(", "ra", ",", "dec", ")", "kepModule", "=", "self", ".", "getChannelAsPolygon", "(", "ch", ")", "r", "=", "np", ".", "array", "(", "[", "x", "[", "0", "]", ",", "y", "[", "0", "]", "]", ")", "-", "kepModule", ".", "polygon", "[", "0", ",", ":", "]", "v1", "=", "kepModule", ".", "polygon", "[", "1", ",", ":", "]", "-", "kepModule", ".", "polygon", "[", "0", ",", ":", "]", "v3", "=", "kepModule", ".", "polygon", "[", "3", ",", ":", "]", "-", "kepModule", ".", "polygon", "[", "0", ",", ":", "]", "# Divide by |v|^2 because you're normalising v and r", "colFrac", "=", "np", ".", "dot", "(", "r", ",", "v1", ")", "/", "np", ".", "linalg", ".", "norm", "(", "v1", ")", "**", "2", "rowFrac", "=", "np", ".", "dot", "(", "r", ",", "v3", ")", "/", "np", ".", "linalg", ".", "norm", "(", "v3", ")", "**", "2", "# This is where it gets a little hairy. The channel \"corners\"", "# supplied to me actually represent points 5x5 pixels inside", "# the science array. Which isn't what you'd expect.", "# These magic numbers are the pixel numbers of the corner", "# edges given in fov.txt", "col", "=", "colFrac", "*", "(", "1106", "-", "17", ")", "+", "17", "row", "=", "rowFrac", "*", "(", "1038", "-", "25", ")", "+", "25", "if", "not", "allowIllegalReturnValues", ":", "if", "not", "self", ".", "colRowIsOnSciencePixel", "(", "col", ",", "row", ")", ":", "msg", "=", "\"Request position %7f %.7f \"", "%", "(", "ra", ",", "dec", ")", "msg", "+=", "\"does not lie on science pixels for channel %i \"", "%", "(", "ch", ")", "msg", "+=", "\"[ %.1f %.1f]\"", "%", "(", "col", ",", "row", ")", "raise", "ValueError", "(", "msg", ")", "# Convert from zero-offset to one-offset coords", "if", "not", "wantZeroOffset", ":", "col", "+=", "1", "row", "+=", "1", "return", "(", "col", ",", "row", ")" ]
Returns (col, row) given a (ra, dec) coordinate and channel number.
[ "Returns", "(", "col", "row", ")", "given", "a", "(", "ra", "dec", ")", "coordinate", "and", "channel", "number", "." ]
python
train
42.594595
invoice-x/invoice2data
src/invoice2data/input/tesseract.py
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/tesseract.py#L4-L38
def to_text(path): """Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ import subprocess from distutils import spawn # Check for dependencies. Needs Tesseract and Imagemagick installed. if not spawn.find_executable('tesseract'): raise EnvironmentError('tesseract not installed.') if not spawn.find_executable('convert'): raise EnvironmentError('imagemagick not installed.') # convert = "convert -density 350 %s -depth 8 tiff:-" % (path) convert = ['convert', '-density', '350', path, '-depth', '8', 'png:-'] p1 = subprocess.Popen(convert, stdout=subprocess.PIPE) tess = ['tesseract', 'stdin', 'stdout'] p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
[ "def", "to_text", "(", "path", ")", ":", "import", "subprocess", "from", "distutils", "import", "spawn", "# Check for dependencies. Needs Tesseract and Imagemagick installed.", "if", "not", "spawn", ".", "find_executable", "(", "'tesseract'", ")", ":", "raise", "EnvironmentError", "(", "'tesseract not installed.'", ")", "if", "not", "spawn", ".", "find_executable", "(", "'convert'", ")", ":", "raise", "EnvironmentError", "(", "'imagemagick not installed.'", ")", "# convert = \"convert -density 350 %s -depth 8 tiff:-\" % (path)", "convert", "=", "[", "'convert'", ",", "'-density'", ",", "'350'", ",", "path", ",", "'-depth'", ",", "'8'", ",", "'png:-'", "]", "p1", "=", "subprocess", ".", "Popen", "(", "convert", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "tess", "=", "[", "'tesseract'", ",", "'stdin'", ",", "'stdout'", "]", "p2", "=", "subprocess", ".", "Popen", "(", "tess", ",", "stdin", "=", "p1", ".", "stdout", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p2", ".", "communicate", "(", ")", "extracted_str", "=", "out", "return", "extracted_str" ]
Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format
[ "Wraps", "Tesseract", "OCR", "." ]
python
train
27.971429
jwkvam/bowtie
bowtie/_app.py
https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L161-L166
def cells(self) -> Generator[Tuple[int, int], None, None]: """Generate cells in span.""" yield from itertools.product( range(self.row_start, self.row_end), range(self.column_start, self.column_end) )
[ "def", "cells", "(", "self", ")", "->", "Generator", "[", "Tuple", "[", "int", ",", "int", "]", ",", "None", ",", "None", "]", ":", "yield", "from", "itertools", ".", "product", "(", "range", "(", "self", ".", "row_start", ",", "self", ".", "row_end", ")", ",", "range", "(", "self", ".", "column_start", ",", "self", ".", "column_end", ")", ")" ]
Generate cells in span.
[ "Generate", "cells", "in", "span", "." ]
python
train
40.333333
shazow/workerpool
samples/blockingworker.py
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/samples/blockingworker.py#L22-L34
def contract(self, jobs, result): """ Perform a contract on a number of jobs and block until a result is retrieved for each job. """ for j in jobs: WorkerPool.put(self, j) r = [] for i in xrange(len(jobs)): r.append(result.get()) return r
[ "def", "contract", "(", "self", ",", "jobs", ",", "result", ")", ":", "for", "j", "in", "jobs", ":", "WorkerPool", ".", "put", "(", "self", ",", "j", ")", "r", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "jobs", ")", ")", ":", "r", ".", "append", "(", "result", ".", "get", "(", ")", ")", "return", "r" ]
Perform a contract on a number of jobs and block until a result is retrieved for each job.
[ "Perform", "a", "contract", "on", "a", "number", "of", "jobs", "and", "block", "until", "a", "result", "is", "retrieved", "for", "each", "job", "." ]
python
train
24.307692
baruwa-enterprise/BaruwaAPI
BaruwaAPI/resource.py
https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L290-L297
def get_authservers(self, domainid, page=None): """Get Authentication servers""" opts = {} if page: opts['page'] = page return self.api_call( ENDPOINTS['authservers']['list'], dict(domainid=domainid), **opts)
[ "def", "get_authservers", "(", "self", ",", "domainid", ",", "page", "=", "None", ")", ":", "opts", "=", "{", "}", "if", "page", ":", "opts", "[", "'page'", "]", "=", "page", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'authservers'", "]", "[", "'list'", "]", ",", "dict", "(", "domainid", "=", "domainid", ")", ",", "*", "*", "opts", ")" ]
Get Authentication servers
[ "Get", "Authentication", "servers" ]
python
train
33.625
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L741-L756
def create_ref(self, ref, sha): """Create a reference in this repository. :param str ref: (required), fully qualified name of the reference, e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and contain at least two slashes, GitHub's API will reject it. :param str sha: (required), SHA1 value to set the reference to :returns: :class:`Reference <github3.git.Reference>` if successful else None """ json = None if ref and ref.count('/') >= 2 and sha: data = {'ref': ref, 'sha': sha} url = self._build_url('git', 'refs', base_url=self._api) json = self._json(self._post(url, data=data), 201) return Reference(json, self) if json else None
[ "def", "create_ref", "(", "self", ",", "ref", ",", "sha", ")", ":", "json", "=", "None", "if", "ref", "and", "ref", ".", "count", "(", "'/'", ")", ">=", "2", "and", "sha", ":", "data", "=", "{", "'ref'", ":", "ref", ",", "'sha'", ":", "sha", "}", "url", "=", "self", ".", "_build_url", "(", "'git'", ",", "'refs'", ",", "base_url", "=", "self", ".", "_api", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", "=", "data", ")", ",", "201", ")", "return", "Reference", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Create a reference in this repository. :param str ref: (required), fully qualified name of the reference, e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and contain at least two slashes, GitHub's API will reject it. :param str sha: (required), SHA1 value to set the reference to :returns: :class:`Reference <github3.git.Reference>` if successful else None
[ "Create", "a", "reference", "in", "this", "repository", "." ]
python
train
48.125
saltstack/salt
salt/proxy/marathon.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/marathon.py#L54-L76
def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False
[ "def", "ping", "(", ")", ":", "try", ":", "response", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "\"{0}/ping\"", ".", "format", "(", "CONFIG", "[", "CONFIG_BASE_URL", "]", ")", ",", "decode_type", "=", "'plain'", ",", "decode", "=", "True", ",", ")", "log", ".", "debug", "(", "'marathon.info returned successfully: %s'", ",", "response", ",", ")", "if", "'text'", "in", "response", "and", "response", "[", "'text'", "]", ".", "strip", "(", ")", "==", "'pong'", ":", "return", "True", "except", "Exception", "as", "ex", ":", "log", ".", "error", "(", "'error calling marathon.info with base_url %s: %s'", ",", "CONFIG", "[", "CONFIG_BASE_URL", "]", ",", "ex", ",", ")", "return", "False" ]
Is the marathon api responding?
[ "Is", "the", "marathon", "api", "responding?" ]
python
train
26.391304