repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
agoragames/haigha
haigha/frames/frame.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/frame.py#L49-L84
def read_frames(cls, reader): ''' Read one or more frames from an IO stream. Buffer must support file object interface. After reading, caller will need to check if there are bytes remaining in the stream. If there are, then that implies that there is one or more incomplete frames and more data needs to be read. The position of the cursor in the frame stream will mark the point at which the last good frame was read. If the caller is expecting a sequence of frames and only received a part of that sequence, they are responsible for buffering those frames until the rest of the frames in the sequence have arrived. ''' rval = deque() while True: frame_start_pos = reader.tell() try: frame = Frame._read_frame(reader) except Reader.BufferUnderflow: # No more data in the stream frame = None except Reader.ReaderError as e: # Some other format error raise Frame.FormatError, str(e), sys.exc_info()[-1] except struct.error as e: raise Frame.FormatError, str(e), sys.exc_info()[-1] if frame is None: reader.seek(frame_start_pos) break rval.append(frame) return rval
[ "def", "read_frames", "(", "cls", ",", "reader", ")", ":", "rval", "=", "deque", "(", ")", "while", "True", ":", "frame_start_pos", "=", "reader", ".", "tell", "(", ")", "try", ":", "frame", "=", "Frame", ".", "_read_frame", "(", "reader", ")", "except", "Reader", ".", "BufferUnderflow", ":", "# No more data in the stream", "frame", "=", "None", "except", "Reader", ".", "ReaderError", "as", "e", ":", "# Some other format error", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "except", "struct", ".", "error", "as", "e", ":", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "if", "frame", "is", "None", ":", "reader", ".", "seek", "(", "frame_start_pos", ")", "break", "rval", ".", "append", "(", "frame", ")", "return", "rval" ]
Read one or more frames from an IO stream. Buffer must support file object interface. After reading, caller will need to check if there are bytes remaining in the stream. If there are, then that implies that there is one or more incomplete frames and more data needs to be read. The position of the cursor in the frame stream will mark the point at which the last good frame was read. If the caller is expecting a sequence of frames and only received a part of that sequence, they are responsible for buffering those frames until the rest of the frames in the sequence have arrived.
[ "Read", "one", "or", "more", "frames", "from", "an", "IO", "stream", ".", "Buffer", "must", "support", "file", "object", "interface", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/parsing_registries.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L1351-L1413
def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER) \ -> Tuple[List[Converter], List[Converter], List[Converter]]: """ Utility method to find matching converters or conversion chains. :param from_type: a required type of input object, or JOKER for 'wildcard'(*) . WARNING: "from_type=AnyObject/object/Any" means "all converters able to source from anything", which is different from "from_type=JOKER" which means "all converters whatever their source type". :param to_type: a required type of output object, or JOKER for 'wildcard'(*) . WARNING: "to_type=AnyObject/object/Any" means "all converters able to produce any type of object", which is different from "to_type=JOKER" which means "all converters whatever type they are able to produce". :return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact. The order of each list is from *less relevant* to *most relevant* """ if from_type is JOKER and to_type is JOKER: matching_dest_generic = self._generic_nonstrict_conversion_chains.copy() + \ self._generic_conversion_chains.copy() matching_dest_approx = [] matching_dest_exact = self._specific_non_strict_conversion_chains.copy() + \ self._specific_conversion_chains.copy() else: matching_dest_generic, matching_dest_approx, matching_dest_exact = [], [], [] # first transform any 'Any' type requirement into the official class for that to_type = get_validated_type(to_type, 'to_type', enforce_not_joker=False) # handle generic converters first for c in (self._generic_nonstrict_conversion_chains + self._generic_conversion_chains): match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict, from_type=from_type, to_type=to_type) if match: # match if is_any_type(to_type): # special case where desired to_type is already Any : in that case a generic converter will # appear in 'exact match' matching_dest_exact.append(c) else: # this is a match from a generic parser to a specific type : add in 'generic' cataegory matching_dest_generic.append(c) # then the specific for c in (self._specific_non_strict_conversion_chains + self._specific_conversion_chains): match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict, from_type=from_type, to_type=to_type) if match: if not is_any_type(to_type): if dest_exact: # we dont care if source is exact or approximate as long as dest is exact matching_dest_exact.append(c) else: # this means that dest is approximate. matching_dest_approx.append(c) else: # we only want to keep the generic ones, and they have already been added pass return matching_dest_generic, matching_dest_approx, matching_dest_exact
[ "def", "get_all_conversion_chains", "(", "self", ",", "from_type", ":", "Type", "[", "Any", "]", "=", "JOKER", ",", "to_type", ":", "Type", "[", "Any", "]", "=", "JOKER", ")", "->", "Tuple", "[", "List", "[", "Converter", "]", ",", "List", "[", "Converter", "]", ",", "List", "[", "Converter", "]", "]", ":", "if", "from_type", "is", "JOKER", "and", "to_type", "is", "JOKER", ":", "matching_dest_generic", "=", "self", ".", "_generic_nonstrict_conversion_chains", ".", "copy", "(", ")", "+", "self", ".", "_generic_conversion_chains", ".", "copy", "(", ")", "matching_dest_approx", "=", "[", "]", "matching_dest_exact", "=", "self", ".", "_specific_non_strict_conversion_chains", ".", "copy", "(", ")", "+", "self", ".", "_specific_conversion_chains", ".", "copy", "(", ")", "else", ":", "matching_dest_generic", ",", "matching_dest_approx", ",", "matching_dest_exact", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# first transform any 'Any' type requirement into the official class for that", "to_type", "=", "get_validated_type", "(", "to_type", ",", "'to_type'", ",", "enforce_not_joker", "=", "False", ")", "# handle generic converters first", "for", "c", "in", "(", "self", ".", "_generic_nonstrict_conversion_chains", "+", "self", ".", "_generic_conversion_chains", ")", ":", "match", ",", "source_exact", ",", "dest_exact", "=", "c", ".", "is_able_to_convert_detailed", "(", "strict", "=", "self", ".", "strict", ",", "from_type", "=", "from_type", ",", "to_type", "=", "to_type", ")", "if", "match", ":", "# match", "if", "is_any_type", "(", "to_type", ")", ":", "# special case where desired to_type is already Any : in that case a generic converter will", "# appear in 'exact match'", "matching_dest_exact", ".", "append", "(", "c", ")", "else", ":", "# this is a match from a generic parser to a specific type : add in 'generic' cataegory", "matching_dest_generic", ".", "append", "(", "c", ")", "# then the specific", "for", "c", "in", "(", "self", ".", "_specific_non_strict_conversion_chains", "+", "self", ".", "_specific_conversion_chains", ")", ":", "match", ",", "source_exact", ",", "dest_exact", "=", "c", ".", "is_able_to_convert_detailed", "(", "strict", "=", "self", ".", "strict", ",", "from_type", "=", "from_type", ",", "to_type", "=", "to_type", ")", "if", "match", ":", "if", "not", "is_any_type", "(", "to_type", ")", ":", "if", "dest_exact", ":", "# we dont care if source is exact or approximate as long as dest is exact", "matching_dest_exact", ".", "append", "(", "c", ")", "else", ":", "# this means that dest is approximate.", "matching_dest_approx", ".", "append", "(", "c", ")", "else", ":", "# we only want to keep the generic ones, and they have already been added", "pass", "return", "matching_dest_generic", ",", "matching_dest_approx", ",", "matching_dest_exact" ]
Utility method to find matching converters or conversion chains. :param from_type: a required type of input object, or JOKER for 'wildcard'(*) . WARNING: "from_type=AnyObject/object/Any" means "all converters able to source from anything", which is different from "from_type=JOKER" which means "all converters whatever their source type". :param to_type: a required type of output object, or JOKER for 'wildcard'(*) . WARNING: "to_type=AnyObject/object/Any" means "all converters able to produce any type of object", which is different from "to_type=JOKER" which means "all converters whatever type they are able to produce". :return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact. The order of each list is from *less relevant* to *most relevant*
[ "Utility", "method", "to", "find", "matching", "converters", "or", "conversion", "chains", "." ]
python
train
jf-parent/brome
brome/core/selector.py
https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/core/selector.py#L105-L174
def resolve_selector(self): """Resolve the selector variable in place """ effective_selector_list = [] for current_selector in self._selector_list: # INLINE SELECTOR if self.get_type(current_selector) != 'selector_variable': effective_selector_list.append(current_selector) # SELECTOR VARIABLE else: # Make sure the proxy driver have a selector dictionary if self.get_type(current_selector) == 'selector_variable': if not BROME_CONFIG['selector_dict']: raise Exception(""" You must provide a selector dictionary if you want to use the selector variable type """) # Make sure that the selector dictionary # contains the selector variable if self._get_selector(current_selector) \ not in BROME_CONFIG['selector_dict']: raise Exception(""" Cannot find the selector variable (%s) in the selector dictionary """ % self._get_selector(current_selector)) effective_selector = BROME_CONFIG['selector_dict'][self._get_selector(current_selector)] # noqa if type(effective_selector) is dict: current_browser_id = False keys = [key for key in effective_selector.keys() if key not in ['default', 'hr']] for key in keys: for target in key.split('|'): try: re.search( target.lower(), self._pdriver.get_id().lower() ).group(0) current_browser_id = key except AttributeError: pass if current_browser_id: effective_selector_list.append( effective_selector.get(current_browser_id) ) else: effective_selector_list.append( effective_selector.get('default') ) else: if self.get_type(effective_selector) in \ [value for key, value in SELECTOR_DICT.items() if key != 'selector_variable']: effective_selector_list.append(effective_selector) else: raise Exception(""" All selector need to start with either: 'nm:' (name), 'xp:' (xpath), 'cn:' (classname), 'id:' (id), 'cs:' (css), 'tn:' (tag name), 'lt:' (link text), 'pl:' (partial link text) """) return effective_selector_list
[ "def", "resolve_selector", "(", "self", ")", ":", "effective_selector_list", "=", "[", "]", "for", "current_selector", "in", "self", ".", "_selector_list", ":", "# INLINE SELECTOR", "if", "self", ".", "get_type", "(", "current_selector", ")", "!=", "'selector_variable'", ":", "effective_selector_list", ".", "append", "(", "current_selector", ")", "# SELECTOR VARIABLE", "else", ":", "# Make sure the proxy driver have a selector dictionary", "if", "self", ".", "get_type", "(", "current_selector", ")", "==", "'selector_variable'", ":", "if", "not", "BROME_CONFIG", "[", "'selector_dict'", "]", ":", "raise", "Exception", "(", "\"\"\"\n You must provide a selector dictionary if you want\n to use the selector variable type\n \"\"\"", ")", "# Make sure that the selector dictionary", "# contains the selector variable", "if", "self", ".", "_get_selector", "(", "current_selector", ")", "not", "in", "BROME_CONFIG", "[", "'selector_dict'", "]", ":", "raise", "Exception", "(", "\"\"\"\n Cannot find the selector variable (%s)\n in the selector dictionary\n \"\"\"", "%", "self", ".", "_get_selector", "(", "current_selector", ")", ")", "effective_selector", "=", "BROME_CONFIG", "[", "'selector_dict'", "]", "[", "self", ".", "_get_selector", "(", "current_selector", ")", "]", "# noqa", "if", "type", "(", "effective_selector", ")", "is", "dict", ":", "current_browser_id", "=", "False", "keys", "=", "[", "key", "for", "key", "in", "effective_selector", ".", "keys", "(", ")", "if", "key", "not", "in", "[", "'default'", ",", "'hr'", "]", "]", "for", "key", "in", "keys", ":", "for", "target", "in", "key", ".", "split", "(", "'|'", ")", ":", "try", ":", "re", ".", "search", "(", "target", ".", "lower", "(", ")", ",", "self", ".", "_pdriver", ".", "get_id", "(", ")", ".", "lower", "(", ")", ")", ".", "group", "(", "0", ")", "current_browser_id", "=", "key", "except", "AttributeError", ":", "pass", "if", "current_browser_id", ":", "effective_selector_list", ".", "append", "(", "effective_selector", ".", "get", "(", "current_browser_id", ")", ")", "else", ":", "effective_selector_list", ".", "append", "(", "effective_selector", ".", "get", "(", "'default'", ")", ")", "else", ":", "if", "self", ".", "get_type", "(", "effective_selector", ")", "in", "[", "value", "for", "key", ",", "value", "in", "SELECTOR_DICT", ".", "items", "(", ")", "if", "key", "!=", "'selector_variable'", "]", ":", "effective_selector_list", ".", "append", "(", "effective_selector", ")", "else", ":", "raise", "Exception", "(", "\"\"\"\n All selector need to start with either:\n 'nm:' (name), 'xp:' (xpath), 'cn:' (classname),\n 'id:' (id), 'cs:' (css), 'tn:' (tag name),\n 'lt:' (link text), 'pl:' (partial link text)\n \"\"\"", ")", "return", "effective_selector_list" ]
Resolve the selector variable in place
[ "Resolve", "the", "selector", "variable", "in", "place" ]
python
train
4degrees/riffle
source/riffle/model.py
https://github.com/4degrees/riffle/blob/e5a0d908df8c93ff1ee7abdda8875fd1667df53d/source/riffle/model.py#L105-L122
def fetchChildren(self): '''Fetch and return new children. Will only fetch children whilst canFetchMore is True. .. note:: It is the caller's responsibility to add each fetched child to this parent if desired using :py:meth:`Item.addChild`. ''' if not self.canFetchMore(): return [] children = self._fetchChildren() self._fetched = True return children
[ "def", "fetchChildren", "(", "self", ")", ":", "if", "not", "self", ".", "canFetchMore", "(", ")", ":", "return", "[", "]", "children", "=", "self", ".", "_fetchChildren", "(", ")", "self", ".", "_fetched", "=", "True", "return", "children" ]
Fetch and return new children. Will only fetch children whilst canFetchMore is True. .. note:: It is the caller's responsibility to add each fetched child to this parent if desired using :py:meth:`Item.addChild`.
[ "Fetch", "and", "return", "new", "children", "." ]
python
test
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L884-L904
def rpc_is_name_zonefile_hash(self, name, zonefile_hash, **con_info): """ Was a zone file hash issued by a name? Return {'result': True/False} """ if not check_name(name) and not check_subdomain(name): return {'error': 'invalid name', 'http_status': 400} if not check_string(zonefile_hash, min_length=LENGTHS['value_hash']*2, max_length=LENGTHS['value_hash']*2, pattern=OP_HEX_PATTERN): return {'error': 'invalid zone file hash', 'http_status': 400} was_set = None if check_name(name): # on-chain name db = get_db_state(self.working_dir) was_set = db.is_name_zonefile_hash(name, zonefile_hash) db.close() else: # off-chain name was_set = is_subdomain_zonefile_hash(name, zonefile_hash) return self.success_response({'result': was_set})
[ "def", "rpc_is_name_zonefile_hash", "(", "self", ",", "name", ",", "zonefile_hash", ",", "*", "*", "con_info", ")", ":", "if", "not", "check_name", "(", "name", ")", "and", "not", "check_subdomain", "(", "name", ")", ":", "return", "{", "'error'", ":", "'invalid name'", ",", "'http_status'", ":", "400", "}", "if", "not", "check_string", "(", "zonefile_hash", ",", "min_length", "=", "LENGTHS", "[", "'value_hash'", "]", "*", "2", ",", "max_length", "=", "LENGTHS", "[", "'value_hash'", "]", "*", "2", ",", "pattern", "=", "OP_HEX_PATTERN", ")", ":", "return", "{", "'error'", ":", "'invalid zone file hash'", ",", "'http_status'", ":", "400", "}", "was_set", "=", "None", "if", "check_name", "(", "name", ")", ":", "# on-chain name ", "db", "=", "get_db_state", "(", "self", ".", "working_dir", ")", "was_set", "=", "db", ".", "is_name_zonefile_hash", "(", "name", ",", "zonefile_hash", ")", "db", ".", "close", "(", ")", "else", ":", "# off-chain name ", "was_set", "=", "is_subdomain_zonefile_hash", "(", "name", ",", "zonefile_hash", ")", "return", "self", ".", "success_response", "(", "{", "'result'", ":", "was_set", "}", ")" ]
Was a zone file hash issued by a name? Return {'result': True/False}
[ "Was", "a", "zone", "file", "hash", "issued", "by", "a", "name?", "Return", "{", "result", ":", "True", "/", "False", "}" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/local_env.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L1119-L1158
def get_nn_info(self, structure, n): """ Get all near-neighbor sites as well as the associated image locations and weights of the site with index n using the closest neighbor distance-based method. Args: structure (Structure): input structure. n (integer): index of site for which to determine near neighbors. Returns: siw (list of tuples (Site, array, float)): tuples, each one of which represents a neighbor site, its image location, and its weight. """ site = structure[n] neighs_dists = structure.get_neighbors(site, self.cutoff) siw = [] if self.get_all_sites == True: for s, dist in neighs_dists: w = dist siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)}) else: min_dist = min([dist for neigh, dist in neighs_dists]) for s, dist in neighs_dists: if dist < (1.0 + self.tol) * min_dist: w = min_dist / dist siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)}) return siw
[ "def", "get_nn_info", "(", "self", ",", "structure", ",", "n", ")", ":", "site", "=", "structure", "[", "n", "]", "neighs_dists", "=", "structure", ".", "get_neighbors", "(", "site", ",", "self", ".", "cutoff", ")", "siw", "=", "[", "]", "if", "self", ".", "get_all_sites", "==", "True", ":", "for", "s", ",", "dist", "in", "neighs_dists", ":", "w", "=", "dist", "siw", ".", "append", "(", "{", "'site'", ":", "s", ",", "'image'", ":", "self", ".", "_get_image", "(", "structure", ",", "s", ")", ",", "'weight'", ":", "w", ",", "'site_index'", ":", "self", ".", "_get_original_site", "(", "structure", ",", "s", ")", "}", ")", "else", ":", "min_dist", "=", "min", "(", "[", "dist", "for", "neigh", ",", "dist", "in", "neighs_dists", "]", ")", "for", "s", ",", "dist", "in", "neighs_dists", ":", "if", "dist", "<", "(", "1.0", "+", "self", ".", "tol", ")", "*", "min_dist", ":", "w", "=", "min_dist", "/", "dist", "siw", ".", "append", "(", "{", "'site'", ":", "s", ",", "'image'", ":", "self", ".", "_get_image", "(", "structure", ",", "s", ")", ",", "'weight'", ":", "w", ",", "'site_index'", ":", "self", ".", "_get_original_site", "(", "structure", ",", "s", ")", "}", ")", "return", "siw" ]
Get all near-neighbor sites as well as the associated image locations and weights of the site with index n using the closest neighbor distance-based method. Args: structure (Structure): input structure. n (integer): index of site for which to determine near neighbors. Returns: siw (list of tuples (Site, array, float)): tuples, each one of which represents a neighbor site, its image location, and its weight.
[ "Get", "all", "near", "-", "neighbor", "sites", "as", "well", "as", "the", "associated", "image", "locations", "and", "weights", "of", "the", "site", "with", "index", "n", "using", "the", "closest", "neighbor", "distance", "-", "based", "method", "." ]
python
train
TestInABox/stackInABox
stackinabox/services/service.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/services/service.py#L267-L290
def get_service_regex(base_url, service_url, sub_service): """Get the regex for a given service. :param base_url: string - Base URI :param service_url: string - Service URI under the Base URI :param sub_service: boolean - is the Service URI for a sub-service? :returns: Python Regex object containing the regex for the Service """ # if the specified service_url is already a regex # then just use. Otherwise create what we need if StackInABoxService.is_regex(service_url): logger.debug('StackInABoxService: Received regex {0} for use...' .format(service_url.pattern)) # Validate the regex against StackInABoxService requirement StackInABoxService.validate_regex(service_url, sub_service) return service_url else: regex = '^{0}{1}$'.format('', service_url) logger.debug('StackInABoxService: {0} + {1} -> {2}' .format(base_url, service_url, regex)) return re.compile(regex)
[ "def", "get_service_regex", "(", "base_url", ",", "service_url", ",", "sub_service", ")", ":", "# if the specified service_url is already a regex", "# then just use. Otherwise create what we need", "if", "StackInABoxService", ".", "is_regex", "(", "service_url", ")", ":", "logger", ".", "debug", "(", "'StackInABoxService: Received regex {0} for use...'", ".", "format", "(", "service_url", ".", "pattern", ")", ")", "# Validate the regex against StackInABoxService requirement", "StackInABoxService", ".", "validate_regex", "(", "service_url", ",", "sub_service", ")", "return", "service_url", "else", ":", "regex", "=", "'^{0}{1}$'", ".", "format", "(", "''", ",", "service_url", ")", "logger", ".", "debug", "(", "'StackInABoxService: {0} + {1} -> {2}'", ".", "format", "(", "base_url", ",", "service_url", ",", "regex", ")", ")", "return", "re", ".", "compile", "(", "regex", ")" ]
Get the regex for a given service. :param base_url: string - Base URI :param service_url: string - Service URI under the Base URI :param sub_service: boolean - is the Service URI for a sub-service? :returns: Python Regex object containing the regex for the Service
[ "Get", "the", "regex", "for", "a", "given", "service", "." ]
python
train
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/base.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/base.py#L707-L736
def normalize(self, metric, prefix=None, fix_case=False): """ Turn a metric into a well-formed metric name prefix.b.c :param metric The metric name to normalize :param prefix A prefix to to add to the normalized name, default None :param fix_case A boolean, indicating whether to make sure that the metric name returned is in "snake_case" """ if isinstance(metric, text_type): metric = unicodedata.normalize('NFKD', metric).encode('ascii', 'ignore') if fix_case: name = self.convert_to_underscore_separated(metric) if prefix is not None: prefix = self.convert_to_underscore_separated(prefix) else: name = re.sub(br"[,\+\*\-/()\[\]{}\s]", b"_", metric) # Eliminate multiple _ name = re.sub(br"__+", b"_", name) # Don't start/end with _ name = re.sub(br"^_", b"", name) name = re.sub(br"_$", b"", name) # Drop ._ and _. name = re.sub(br"\._", b".", name) name = re.sub(br"_\.", b".", name) if prefix is not None: return ensure_bytes(prefix) + b"." + name else: return name
[ "def", "normalize", "(", "self", ",", "metric", ",", "prefix", "=", "None", ",", "fix_case", "=", "False", ")", ":", "if", "isinstance", "(", "metric", ",", "text_type", ")", ":", "metric", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "metric", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "if", "fix_case", ":", "name", "=", "self", ".", "convert_to_underscore_separated", "(", "metric", ")", "if", "prefix", "is", "not", "None", ":", "prefix", "=", "self", ".", "convert_to_underscore_separated", "(", "prefix", ")", "else", ":", "name", "=", "re", ".", "sub", "(", "br\"[,\\+\\*\\-/()\\[\\]{}\\s]\"", ",", "b\"_\"", ",", "metric", ")", "# Eliminate multiple _", "name", "=", "re", ".", "sub", "(", "br\"__+\"", ",", "b\"_\"", ",", "name", ")", "# Don't start/end with _", "name", "=", "re", ".", "sub", "(", "br\"^_\"", ",", "b\"\"", ",", "name", ")", "name", "=", "re", ".", "sub", "(", "br\"_$\"", ",", "b\"\"", ",", "name", ")", "# Drop ._ and _.", "name", "=", "re", ".", "sub", "(", "br\"\\._\"", ",", "b\".\"", ",", "name", ")", "name", "=", "re", ".", "sub", "(", "br\"_\\.\"", ",", "b\".\"", ",", "name", ")", "if", "prefix", "is", "not", "None", ":", "return", "ensure_bytes", "(", "prefix", ")", "+", "b\".\"", "+", "name", "else", ":", "return", "name" ]
Turn a metric into a well-formed metric name prefix.b.c :param metric The metric name to normalize :param prefix A prefix to to add to the normalized name, default None :param fix_case A boolean, indicating whether to make sure that the metric name returned is in "snake_case"
[ "Turn", "a", "metric", "into", "a", "well", "-", "formed", "metric", "name", "prefix", ".", "b", ".", "c", ":", "param", "metric", "The", "metric", "name", "to", "normalize", ":", "param", "prefix", "A", "prefix", "to", "to", "add", "to", "the", "normalized", "name", "default", "None", ":", "param", "fix_case", "A", "boolean", "indicating", "whether", "to", "make", "sure", "that", "the", "metric", "name", "returned", "is", "in", "snake_case" ]
python
train
karel-brinda/rnftools
rnftools/rnfformat/FqMerger.py
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/FqMerger.py#L88-L119
def run(self): """Run merging. """ print("", file=sys.stderr) print("Going to merge/convert RNF-FASTQ files.", file=sys.stderr) print("", file=sys.stderr) print(" mode: ", self.mode, file=sys.stderr) print(" input files: ", ", ".join(self.input_files_fn), file=sys.stderr) print(" output files: ", ", ".join(self.output_files_fn), file=sys.stderr) print("", file=sys.stderr) while len(self.i_files_weighted) > 0: file_id = self.rng.randint(0, len(self.i_files_weighted) - 1) for i in range(READS_IN_GROUP * self._reads_in_tuple): if self.i_files_weighted[file_id].closed: del self.i_files_weighted[file_id] break ln1 = self.i_files_weighted[file_id].readline() ln2 = self.i_files_weighted[file_id].readline() ln3 = self.i_files_weighted[file_id].readline() ln4 = self.i_files_weighted[file_id].readline() if ln1 == "" or ln2 == "" or ln3 == "" or ln4 == "": self.i_files_weighted[file_id].close() del self.i_files_weighted[file_id] break assert ln1[0] == "@", ln1 assert ln3[0] == "+", ln3 self.output.save_read(ln1, ln2, ln3, ln4) self.output.close()
[ "def", "run", "(", "self", ")", ":", "print", "(", "\"\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Going to merge/convert RNF-FASTQ files.\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\" mode: \"", ",", "self", ".", "mode", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\" input files: \"", ",", "\", \"", ".", "join", "(", "self", ".", "input_files_fn", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\" output files: \"", ",", "\", \"", ".", "join", "(", "self", ".", "output_files_fn", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"\"", ",", "file", "=", "sys", ".", "stderr", ")", "while", "len", "(", "self", ".", "i_files_weighted", ")", ">", "0", ":", "file_id", "=", "self", ".", "rng", ".", "randint", "(", "0", ",", "len", "(", "self", ".", "i_files_weighted", ")", "-", "1", ")", "for", "i", "in", "range", "(", "READS_IN_GROUP", "*", "self", ".", "_reads_in_tuple", ")", ":", "if", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "closed", ":", "del", "self", ".", "i_files_weighted", "[", "file_id", "]", "break", "ln1", "=", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "readline", "(", ")", "ln2", "=", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "readline", "(", ")", "ln3", "=", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "readline", "(", ")", "ln4", "=", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "readline", "(", ")", "if", "ln1", "==", "\"\"", "or", "ln2", "==", "\"\"", "or", "ln3", "==", "\"\"", "or", "ln4", "==", "\"\"", ":", "self", ".", "i_files_weighted", "[", "file_id", "]", ".", "close", "(", ")", "del", "self", ".", "i_files_weighted", "[", "file_id", "]", "break", "assert", "ln1", "[", "0", "]", "==", "\"@\"", ",", "ln1", "assert", "ln3", "[", "0", "]", "==", "\"+\"", ",", "ln3", "self", ".", "output", ".", "save_read", "(", "ln1", ",", "ln2", ",", "ln3", ",", "ln4", ")", "self", ".", "output", ".", "close", "(", ")" ]
Run merging.
[ "Run", "merging", "." ]
python
train
osrg/ryu
ryu/services/protocols/bgp/bgpspeaker.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/bgpspeaker.py#L1077-L1112
def vrfs_get(self, subcommand='routes', route_dist=None, route_family='all', format='json'): """ This method returns the existing vrfs. ``subcommand`` specifies one of the following. - 'routes': shows routes present for vrf - 'summary': shows configuration and summary of vrf ``route_dist`` specifies a route distinguisher value. If route_family is not 'all', this value must be specified. ``route_family`` specifies route family of the VRF. This parameter must be one of the following. - RF_VPN_V4 = 'ipv4' - RF_VPN_V6 = 'ipv6' - RF_L2_EVPN = 'evpn' - 'all' (default) ``format`` specifies the format of the response. This parameter must be one of the following. - 'json' (default) - 'cli' """ show = { 'format': format, } if route_family in SUPPORTED_VRF_RF: assert route_dist is not None show['params'] = ['vrf', subcommand, route_dist, route_family] else: show['params'] = ['vrf', subcommand, 'all'] return call('operator.show', **show)
[ "def", "vrfs_get", "(", "self", ",", "subcommand", "=", "'routes'", ",", "route_dist", "=", "None", ",", "route_family", "=", "'all'", ",", "format", "=", "'json'", ")", ":", "show", "=", "{", "'format'", ":", "format", ",", "}", "if", "route_family", "in", "SUPPORTED_VRF_RF", ":", "assert", "route_dist", "is", "not", "None", "show", "[", "'params'", "]", "=", "[", "'vrf'", ",", "subcommand", ",", "route_dist", ",", "route_family", "]", "else", ":", "show", "[", "'params'", "]", "=", "[", "'vrf'", ",", "subcommand", ",", "'all'", "]", "return", "call", "(", "'operator.show'", ",", "*", "*", "show", ")" ]
This method returns the existing vrfs. ``subcommand`` specifies one of the following. - 'routes': shows routes present for vrf - 'summary': shows configuration and summary of vrf ``route_dist`` specifies a route distinguisher value. If route_family is not 'all', this value must be specified. ``route_family`` specifies route family of the VRF. This parameter must be one of the following. - RF_VPN_V4 = 'ipv4' - RF_VPN_V6 = 'ipv6' - RF_L2_EVPN = 'evpn' - 'all' (default) ``format`` specifies the format of the response. This parameter must be one of the following. - 'json' (default) - 'cli'
[ "This", "method", "returns", "the", "existing", "vrfs", "." ]
python
train
paulovn/sparql-kernel
sparqlkernel/connection.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/connection.py#L208-L220
def rdf_iterator(graph, lang, add_vtype=False): """ Convert a Graph response into a double iterable, by triples and elements. Optionally add element type, and filter triples by language (on literals) """ # Return the header row hdr = ('subject', 'predicate', 'object') yield hdr if not add_vtype else ((h, 'type') for h in hdr) # Now the data rows for row in graph: if lang and not lang_match_rdf(row, lang): continue yield ((unicode(c), gtype(c)) for c in row)
[ "def", "rdf_iterator", "(", "graph", ",", "lang", ",", "add_vtype", "=", "False", ")", ":", "# Return the header row", "hdr", "=", "(", "'subject'", ",", "'predicate'", ",", "'object'", ")", "yield", "hdr", "if", "not", "add_vtype", "else", "(", "(", "h", ",", "'type'", ")", "for", "h", "in", "hdr", ")", "# Now the data rows", "for", "row", "in", "graph", ":", "if", "lang", "and", "not", "lang_match_rdf", "(", "row", ",", "lang", ")", ":", "continue", "yield", "(", "(", "unicode", "(", "c", ")", ",", "gtype", "(", "c", ")", ")", "for", "c", "in", "row", ")" ]
Convert a Graph response into a double iterable, by triples and elements. Optionally add element type, and filter triples by language (on literals)
[ "Convert", "a", "Graph", "response", "into", "a", "double", "iterable", "by", "triples", "and", "elements", ".", "Optionally", "add", "element", "type", "and", "filter", "triples", "by", "language", "(", "on", "literals", ")" ]
python
train
rosenbrockc/ci
pyci/server.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/server.py#L240-L255
def uninstall(self, xmlpath): """Uninstalls the repository with the specified XML path from the server. """ from os import path fullpath = path.abspath(path.expanduser(xmlpath)) if fullpath in self.installed: repo = RepositorySettings(self, fullpath) if repo.name.lower() in self.repositories: del self.repositories[repo.name.lower()] if repo.name.lower() in self.archive: del self.archive[repo.name.lower()] self._save_archive() self.installed.remove(fullpath) self._save_installed() else: warn("The repository at {} was not installed to begin with.".format(fullpath))
[ "def", "uninstall", "(", "self", ",", "xmlpath", ")", ":", "from", "os", "import", "path", "fullpath", "=", "path", ".", "abspath", "(", "path", ".", "expanduser", "(", "xmlpath", ")", ")", "if", "fullpath", "in", "self", ".", "installed", ":", "repo", "=", "RepositorySettings", "(", "self", ",", "fullpath", ")", "if", "repo", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "repositories", ":", "del", "self", ".", "repositories", "[", "repo", ".", "name", ".", "lower", "(", ")", "]", "if", "repo", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "archive", ":", "del", "self", ".", "archive", "[", "repo", ".", "name", ".", "lower", "(", ")", "]", "self", ".", "_save_archive", "(", ")", "self", ".", "installed", ".", "remove", "(", "fullpath", ")", "self", ".", "_save_installed", "(", ")", "else", ":", "warn", "(", "\"The repository at {} was not installed to begin with.\"", ".", "format", "(", "fullpath", ")", ")" ]
Uninstalls the repository with the specified XML path from the server.
[ "Uninstalls", "the", "repository", "with", "the", "specified", "XML", "path", "from", "the", "server", "." ]
python
train
CLARIAH/grlc
src/utils.py
https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/utils.py#L30-L36
def getLoader(user, repo, sha=None, prov=None): """Build a fileLoader (LocalLoader or GithubLoader) for the given repository.""" if user is None and repo is None: loader = LocalLoader() else: loader = GithubLoader(user, repo, sha, prov) return loader
[ "def", "getLoader", "(", "user", ",", "repo", ",", "sha", "=", "None", ",", "prov", "=", "None", ")", ":", "if", "user", "is", "None", "and", "repo", "is", "None", ":", "loader", "=", "LocalLoader", "(", ")", "else", ":", "loader", "=", "GithubLoader", "(", "user", ",", "repo", ",", "sha", ",", "prov", ")", "return", "loader" ]
Build a fileLoader (LocalLoader or GithubLoader) for the given repository.
[ "Build", "a", "fileLoader", "(", "LocalLoader", "or", "GithubLoader", ")", "for", "the", "given", "repository", "." ]
python
train
czepluch/pysecp256k1
c_secp256k1/__init__.py
https://github.com/czepluch/pysecp256k1/blob/164cb305857c5ba7a26adb6bd85459c5ea32ddd1/c_secp256k1/__init__.py#L213-L233
def ecdsa_sign_compact(msg32, seckey): """ Takes the same message and seckey as _ecdsa_sign_recoverable Returns an unsigned char array of length 65 containing the signed message """ # Assign 65 bytes to output output64 = ffi.new("unsigned char[65]") # ffi definition of recid recid = ffi.new("int *") lib.secp256k1_ecdsa_recoverable_signature_serialize_compact( ctx, output64, recid, _ecdsa_sign_recoverable(msg32, seckey) ) # Assign recid to the last byte in the output array r = ffi.buffer(output64)[:64] + struct.pack("B", recid[0]) assert len(r) == 65, len(r) return r
[ "def", "ecdsa_sign_compact", "(", "msg32", ",", "seckey", ")", ":", "# Assign 65 bytes to output", "output64", "=", "ffi", ".", "new", "(", "\"unsigned char[65]\"", ")", "# ffi definition of recid", "recid", "=", "ffi", ".", "new", "(", "\"int *\"", ")", "lib", ".", "secp256k1_ecdsa_recoverable_signature_serialize_compact", "(", "ctx", ",", "output64", ",", "recid", ",", "_ecdsa_sign_recoverable", "(", "msg32", ",", "seckey", ")", ")", "# Assign recid to the last byte in the output array", "r", "=", "ffi", ".", "buffer", "(", "output64", ")", "[", ":", "64", "]", "+", "struct", ".", "pack", "(", "\"B\"", ",", "recid", "[", "0", "]", ")", "assert", "len", "(", "r", ")", "==", "65", ",", "len", "(", "r", ")", "return", "r" ]
Takes the same message and seckey as _ecdsa_sign_recoverable Returns an unsigned char array of length 65 containing the signed message
[ "Takes", "the", "same", "message", "and", "seckey", "as", "_ecdsa_sign_recoverable", "Returns", "an", "unsigned", "char", "array", "of", "length", "65", "containing", "the", "signed", "message" ]
python
train
quintusdias/glymur
glymur/jp2box.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2box.py#L436-L448
def write(self, fptr): """Write an Colour Specification box to file. """ self._write_validate() length = 15 if self.icc_profile is None else 11 + len(self.icc_profile) fptr.write(struct.pack('>I4s', length, b'colr')) read_buffer = struct.pack('>BBBI', self.method, self.precedence, self.approximation, self.colorspace) fptr.write(read_buffer)
[ "def", "write", "(", "self", ",", "fptr", ")", ":", "self", ".", "_write_validate", "(", ")", "length", "=", "15", "if", "self", ".", "icc_profile", "is", "None", "else", "11", "+", "len", "(", "self", ".", "icc_profile", ")", "fptr", ".", "write", "(", "struct", ".", "pack", "(", "'>I4s'", ",", "length", ",", "b'colr'", ")", ")", "read_buffer", "=", "struct", ".", "pack", "(", "'>BBBI'", ",", "self", ".", "method", ",", "self", ".", "precedence", ",", "self", ".", "approximation", ",", "self", ".", "colorspace", ")", "fptr", ".", "write", "(", "read_buffer", ")" ]
Write an Colour Specification box to file.
[ "Write", "an", "Colour", "Specification", "box", "to", "file", "." ]
python
train
mlperf/training
translation/tensorflow/transformer/utils/tokenizer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L216-L221
def _unicode_to_native(s): """Convert string from unicode to native format (required in Python 2).""" if six.PY2: return s.encode("utf-8") if isinstance(s, unicode) else s else: return s
[ "def", "_unicode_to_native", "(", "s", ")", ":", "if", "six", ".", "PY2", ":", "return", "s", ".", "encode", "(", "\"utf-8\"", ")", "if", "isinstance", "(", "s", ",", "unicode", ")", "else", "s", "else", ":", "return", "s" ]
Convert string from unicode to native format (required in Python 2).
[ "Convert", "string", "from", "unicode", "to", "native", "format", "(", "required", "in", "Python", "2", ")", "." ]
python
train
mapillary/mapillary_tools
mapillary_tools/process_video.py
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/process_video.py#L194-L209
def get_video_end_time(video_file): """Get video end time in seconds""" if not os.path.isfile(video_file): print("Error, video file {} does not exist".format(video_file)) return None try: time_string = FFProbe(video_file).video[0].creation_time try: creation_time = datetime.datetime.strptime( time_string, TIME_FORMAT) except: creation_time = datetime.datetime.strptime( time_string, TIME_FORMAT_2) except: return None return creation_time
[ "def", "get_video_end_time", "(", "video_file", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "video_file", ")", ":", "print", "(", "\"Error, video file {} does not exist\"", ".", "format", "(", "video_file", ")", ")", "return", "None", "try", ":", "time_string", "=", "FFProbe", "(", "video_file", ")", ".", "video", "[", "0", "]", ".", "creation_time", "try", ":", "creation_time", "=", "datetime", ".", "datetime", ".", "strptime", "(", "time_string", ",", "TIME_FORMAT", ")", "except", ":", "creation_time", "=", "datetime", ".", "datetime", ".", "strptime", "(", "time_string", ",", "TIME_FORMAT_2", ")", "except", ":", "return", "None", "return", "creation_time" ]
Get video end time in seconds
[ "Get", "video", "end", "time", "in", "seconds" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L158-L211
def algebra_inverse_solve(left, right, var, solve_ops): """Solves for the value of the given var in an expression. Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. var: A char. The variable to solve for. solve_ops: A dictionary with the following properties. * For each operator in the expression, there is a rule that determines how to cancel out a value either to the left or the right of that operator. * For each rule, there is an entry in the dictionary. The key is two chars- the op char, and either 'l' or 'r' meaning rule for canceling out the left or right sides. For example, '+l', '+r', '-l', '-r'. * The value of each entry is a function with the following signature: (left, right, to_tree) -> (new_from_tree, new_to_tree) left- Expression on left side of the op. right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. Returns: The root of an ExprNode tree which holds the value of `var` after solving. Raises: ValueError: If `var` does not appear exactly once in the equation (which includes the left and right sides). """ is_in_left = is_in_expr(left, var) is_in_right = is_in_expr(right, var) if is_in_left == is_in_right: if is_in_left: raise ValueError("Solve-variable '%s' is on both sides of the equation. " "Only equations where the solve variable-appears once " "are supported by this solver. Left: '%s', right: '%s'" % (var, str(left), str(right))) else: raise ValueError("Solve-variable '%s' is not present in the equation. It " "must appear once. Left: '%s', right: '%s'" % (var, str(left), str(right))) from_tree = left if is_in_left else right to_tree = left if not is_in_left else right while from_tree != var: is_in_left = is_in_expr(from_tree.left, var) is_in_right = is_in_expr(from_tree.right, var) from_tree, to_tree = (solve_ops[str(from_tree.op) + ("l" if is_in_left else "r")]( from_tree.left, from_tree.right, to_tree)) return to_tree
[ "def", "algebra_inverse_solve", "(", "left", ",", "right", ",", "var", ",", "solve_ops", ")", ":", "is_in_left", "=", "is_in_expr", "(", "left", ",", "var", ")", "is_in_right", "=", "is_in_expr", "(", "right", ",", "var", ")", "if", "is_in_left", "==", "is_in_right", ":", "if", "is_in_left", ":", "raise", "ValueError", "(", "\"Solve-variable '%s' is on both sides of the equation. \"", "\"Only equations where the solve variable-appears once \"", "\"are supported by this solver. Left: '%s', right: '%s'\"", "%", "(", "var", ",", "str", "(", "left", ")", ",", "str", "(", "right", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Solve-variable '%s' is not present in the equation. It \"", "\"must appear once. Left: '%s', right: '%s'\"", "%", "(", "var", ",", "str", "(", "left", ")", ",", "str", "(", "right", ")", ")", ")", "from_tree", "=", "left", "if", "is_in_left", "else", "right", "to_tree", "=", "left", "if", "not", "is_in_left", "else", "right", "while", "from_tree", "!=", "var", ":", "is_in_left", "=", "is_in_expr", "(", "from_tree", ".", "left", ",", "var", ")", "is_in_right", "=", "is_in_expr", "(", "from_tree", ".", "right", ",", "var", ")", "from_tree", ",", "to_tree", "=", "(", "solve_ops", "[", "str", "(", "from_tree", ".", "op", ")", "+", "(", "\"l\"", "if", "is_in_left", "else", "\"r\"", ")", "]", "(", "from_tree", ".", "left", ",", "from_tree", ".", "right", ",", "to_tree", ")", ")", "return", "to_tree" ]
Solves for the value of the given var in an expression. Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. var: A char. The variable to solve for. solve_ops: A dictionary with the following properties. * For each operator in the expression, there is a rule that determines how to cancel out a value either to the left or the right of that operator. * For each rule, there is an entry in the dictionary. The key is two chars- the op char, and either 'l' or 'r' meaning rule for canceling out the left or right sides. For example, '+l', '+r', '-l', '-r'. * The value of each entry is a function with the following signature: (left, right, to_tree) -> (new_from_tree, new_to_tree) left- Expression on left side of the op. right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. Returns: The root of an ExprNode tree which holds the value of `var` after solving. Raises: ValueError: If `var` does not appear exactly once in the equation (which includes the left and right sides).
[ "Solves", "for", "the", "value", "of", "the", "given", "var", "in", "an", "expression", "." ]
python
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L319-L333
def add_book_series(self, title, volume=None): """ :param volume: the volume of the book :type volume: string :param title: the title of the book :type title: string """ book_series = {} if title is not None: book_series['title'] = title if volume is not None: book_series['volume'] = volume self._append_to('book_series', book_series)
[ "def", "add_book_series", "(", "self", ",", "title", ",", "volume", "=", "None", ")", ":", "book_series", "=", "{", "}", "if", "title", "is", "not", "None", ":", "book_series", "[", "'title'", "]", "=", "title", "if", "volume", "is", "not", "None", ":", "book_series", "[", "'volume'", "]", "=", "volume", "self", ".", "_append_to", "(", "'book_series'", ",", "book_series", ")" ]
:param volume: the volume of the book :type volume: string :param title: the title of the book :type title: string
[ ":", "param", "volume", ":", "the", "volume", "of", "the", "book", ":", "type", "volume", ":", "string" ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L2590-L2615
def nvmlDeviceGetPciInfo(handle): r""" /** * Retrieves the PCI attributes of this device. * * For all products. * * See \ref nvmlPciInfo_t for details on the available PCI info. * * @param device The identifier of the target device * @param pci Reference in which to return the PCI info * * @return * - \ref NVML_SUCCESS if \a pci has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo """ c_info = nvmlPciInfo_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfo_v2") ret = fn(handle, byref(c_info)) _nvmlCheckReturn(ret) return bytes_to_str(c_info)
[ "def", "nvmlDeviceGetPciInfo", "(", "handle", ")", ":", "c_info", "=", "nvmlPciInfo_t", "(", ")", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetPciInfo_v2\"", ")", "ret", "=", "fn", "(", "handle", ",", "byref", "(", "c_info", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "bytes_to_str", "(", "c_info", ")" ]
r""" /** * Retrieves the PCI attributes of this device. * * For all products. * * See \ref nvmlPciInfo_t for details on the available PCI info. * * @param device The identifier of the target device * @param pci Reference in which to return the PCI info * * @return * - \ref NVML_SUCCESS if \a pci has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo
[ "r", "/", "**", "*", "Retrieves", "the", "PCI", "attributes", "of", "this", "device", ".", "*", "*", "For", "all", "products", ".", "*", "*", "See", "\\", "ref", "nvmlPciInfo_t", "for", "details", "on", "the", "available", "PCI", "info", ".", "*", "*" ]
python
train
MechanicalSoup/MechanicalSoup
mechanicalsoup/stateful_browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/027a270febf5bcda6a75db60ea9838d631370f4b/mechanicalsoup/stateful_browser.py#L335-L366
def download_link(self, link=None, file=None, *args, **kwargs): """Downloads the contents of a link to a file. This function behaves similarly to :func:`follow_link`, but the browser state will not change when calling this function. :param file: Filesystem path where the page contents will be downloaded. If the file already exists, it will be overwritten. Other arguments are the same as :func:`follow_link` (``link`` can either be a bs4.element.Tag or a URL regex, other arguments are forwarded to :func:`find_link`). :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object. """ link = self._find_link_internal(link, args, kwargs) url = self.absolute_url(link['href']) referer = self.get_url() headers = {'Referer': referer} if referer else None response = self.session.get(url, headers=headers) if self.raise_on_404 and response.status_code == 404: raise LinkNotFoundError() # Save the response content to file if file is not None: with open(file, 'wb') as f: f.write(response.content) return response
[ "def", "download_link", "(", "self", ",", "link", "=", "None", ",", "file", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "link", "=", "self", ".", "_find_link_internal", "(", "link", ",", "args", ",", "kwargs", ")", "url", "=", "self", ".", "absolute_url", "(", "link", "[", "'href'", "]", ")", "referer", "=", "self", ".", "get_url", "(", ")", "headers", "=", "{", "'Referer'", ":", "referer", "}", "if", "referer", "else", "None", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "if", "self", ".", "raise_on_404", "and", "response", ".", "status_code", "==", "404", ":", "raise", "LinkNotFoundError", "(", ")", "# Save the response content to file", "if", "file", "is", "not", "None", ":", "with", "open", "(", "file", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "response", ".", "content", ")", "return", "response" ]
Downloads the contents of a link to a file. This function behaves similarly to :func:`follow_link`, but the browser state will not change when calling this function. :param file: Filesystem path where the page contents will be downloaded. If the file already exists, it will be overwritten. Other arguments are the same as :func:`follow_link` (``link`` can either be a bs4.element.Tag or a URL regex, other arguments are forwarded to :func:`find_link`). :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object.
[ "Downloads", "the", "contents", "of", "a", "link", "to", "a", "file", ".", "This", "function", "behaves", "similarly", "to", ":", "func", ":", "follow_link", "but", "the", "browser", "state", "will", "not", "change", "when", "calling", "this", "function", "." ]
python
train
dailymuse/oz
oz/error_pages/middleware.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/error_pages/middleware.py#L16-L44
def _on_error_page_write_error(self, status_code, **kwargs): """Replaces the default Tornado error page with a Django-styled one""" if oz.settings.get('debug'): exception_type, exception_value, tback = sys.exc_info() is_breakpoint = isinstance(exception_value, oz.error_pages.DebugBreakException) frames = oz.error_pages.get_frames(tback, is_breakpoint) frames.reverse() if is_breakpoint: exception_type = 'Debug breakpoint' exception_value = '' self.render(oz.settings["error_pages_template"], exception_type=exception_type, exception_value=exception_value, frames=frames, request_input=self.request.body, request_cookies=self.cookies, request_headers=self.request.headers, request_path=self.request.uri, request_method=self.request.method, response_output="".join(self._write_buffer), response_headers=self._headers, prettify_object=oz.error_pages.prettify_object, ) return oz.break_trigger
[ "def", "_on_error_page_write_error", "(", "self", ",", "status_code", ",", "*", "*", "kwargs", ")", ":", "if", "oz", ".", "settings", ".", "get", "(", "'debug'", ")", ":", "exception_type", ",", "exception_value", ",", "tback", "=", "sys", ".", "exc_info", "(", ")", "is_breakpoint", "=", "isinstance", "(", "exception_value", ",", "oz", ".", "error_pages", ".", "DebugBreakException", ")", "frames", "=", "oz", ".", "error_pages", ".", "get_frames", "(", "tback", ",", "is_breakpoint", ")", "frames", ".", "reverse", "(", ")", "if", "is_breakpoint", ":", "exception_type", "=", "'Debug breakpoint'", "exception_value", "=", "''", "self", ".", "render", "(", "oz", ".", "settings", "[", "\"error_pages_template\"", "]", ",", "exception_type", "=", "exception_type", ",", "exception_value", "=", "exception_value", ",", "frames", "=", "frames", ",", "request_input", "=", "self", ".", "request", ".", "body", ",", "request_cookies", "=", "self", ".", "cookies", ",", "request_headers", "=", "self", ".", "request", ".", "headers", ",", "request_path", "=", "self", ".", "request", ".", "uri", ",", "request_method", "=", "self", ".", "request", ".", "method", ",", "response_output", "=", "\"\"", ".", "join", "(", "self", ".", "_write_buffer", ")", ",", "response_headers", "=", "self", ".", "_headers", ",", "prettify_object", "=", "oz", ".", "error_pages", ".", "prettify_object", ",", ")", "return", "oz", ".", "break_trigger" ]
Replaces the default Tornado error page with a Django-styled one
[ "Replaces", "the", "default", "Tornado", "error", "page", "with", "a", "Django", "-", "styled", "one" ]
python
train
EUDAT-B2SAFE/B2HANDLE
b2handle/handleclient.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L365-L402
def is_10320LOC_empty(self, handle, handlerecord_json=None): ''' Checks if there is a 10320/LOC entry in the handle record. *Note:* In the unlikely case that there is a 10320/LOC entry, but it does not contain any locations, it is treated as if there was none. :param handle: The handle. :param handlerecord_json: Optional. The content of the response of a GET request for the handle as a dict. Avoids another GET request. :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :return: True if the record contains NO 10320/LOC entry; False if it does contain one. ''' LOGGER.debug('is_10320LOC_empty...') handlerecord_json = self.__get_handle_record_if_necessary(handle, handlerecord_json) if handlerecord_json is None: raise HandleNotFoundException(handle=handle) list_of_entries = handlerecord_json['values'] num_entries = 0 num_URL = 0 for entry in list_of_entries: if entry['type'] == '10320/LOC': num_entries += 1 xmlroot = ET.fromstring(entry['data']['value']) list_of_locations = xmlroot.findall('location') for item in list_of_locations: if item.get('href') is not None: num_URL += 1 if num_entries == 0: return True else: if num_URL == 0: return True else: return False
[ "def", "is_10320LOC_empty", "(", "self", ",", "handle", ",", "handlerecord_json", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "'is_10320LOC_empty...'", ")", "handlerecord_json", "=", "self", ".", "__get_handle_record_if_necessary", "(", "handle", ",", "handlerecord_json", ")", "if", "handlerecord_json", "is", "None", ":", "raise", "HandleNotFoundException", "(", "handle", "=", "handle", ")", "list_of_entries", "=", "handlerecord_json", "[", "'values'", "]", "num_entries", "=", "0", "num_URL", "=", "0", "for", "entry", "in", "list_of_entries", ":", "if", "entry", "[", "'type'", "]", "==", "'10320/LOC'", ":", "num_entries", "+=", "1", "xmlroot", "=", "ET", ".", "fromstring", "(", "entry", "[", "'data'", "]", "[", "'value'", "]", ")", "list_of_locations", "=", "xmlroot", ".", "findall", "(", "'location'", ")", "for", "item", "in", "list_of_locations", ":", "if", "item", ".", "get", "(", "'href'", ")", "is", "not", "None", ":", "num_URL", "+=", "1", "if", "num_entries", "==", "0", ":", "return", "True", "else", ":", "if", "num_URL", "==", "0", ":", "return", "True", "else", ":", "return", "False" ]
Checks if there is a 10320/LOC entry in the handle record. *Note:* In the unlikely case that there is a 10320/LOC entry, but it does not contain any locations, it is treated as if there was none. :param handle: The handle. :param handlerecord_json: Optional. The content of the response of a GET request for the handle as a dict. Avoids another GET request. :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :return: True if the record contains NO 10320/LOC entry; False if it does contain one.
[ "Checks", "if", "there", "is", "a", "10320", "/", "LOC", "entry", "in", "the", "handle", "record", ".", "*", "Note", ":", "*", "In", "the", "unlikely", "case", "that", "there", "is", "a", "10320", "/", "LOC", "entry", "but", "it", "does", "not", "contain", "any", "locations", "it", "is", "treated", "as", "if", "there", "was", "none", "." ]
python
train
markovmodel/PyEMMA
setup_util.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/setup_util.py#L114-L128
def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ with TemporaryDirectory() as tmpdir, \ stdchannel_redirected(sys.stderr, os.devnull), \ stdchannel_redirected(sys.stdout, os.devnull): f = tempfile.mktemp(suffix='.cpp', dir=tmpdir) with open(f, 'w') as fh: fh.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([f], extra_postargs=[flagname], output_dir=tmpdir) except setuptools.distutils.errors.CompileError: return False return True
[ "def", "has_flag", "(", "compiler", ",", "flagname", ")", ":", "with", "TemporaryDirectory", "(", ")", "as", "tmpdir", ",", "stdchannel_redirected", "(", "sys", ".", "stderr", ",", "os", ".", "devnull", ")", ",", "stdchannel_redirected", "(", "sys", ".", "stdout", ",", "os", ".", "devnull", ")", ":", "f", "=", "tempfile", ".", "mktemp", "(", "suffix", "=", "'.cpp'", ",", "dir", "=", "tmpdir", ")", "with", "open", "(", "f", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "'int main (int argc, char **argv) { return 0; }'", ")", "try", ":", "compiler", ".", "compile", "(", "[", "f", "]", ",", "extra_postargs", "=", "[", "flagname", "]", ",", "output_dir", "=", "tmpdir", ")", "except", "setuptools", ".", "distutils", ".", "errors", ".", "CompileError", ":", "return", "False", "return", "True" ]
Return a boolean indicating whether a flag name is supported on the specified compiler.
[ "Return", "a", "boolean", "indicating", "whether", "a", "flag", "name", "is", "supported", "on", "the", "specified", "compiler", "." ]
python
train
dhermes/bezier
docs/make_images.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L48-L57
def save_image(figure, filename): """Save an image to the docs images directory. Args: filename (str): The name of the file (not containing directory info). """ path = os.path.join(IMAGES_DIR, filename) figure.savefig(path, bbox_inches="tight") plt.close(figure)
[ "def", "save_image", "(", "figure", ",", "filename", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "IMAGES_DIR", ",", "filename", ")", "figure", ".", "savefig", "(", "path", ",", "bbox_inches", "=", "\"tight\"", ")", "plt", ".", "close", "(", "figure", ")" ]
Save an image to the docs images directory. Args: filename (str): The name of the file (not containing directory info).
[ "Save", "an", "image", "to", "the", "docs", "images", "directory", "." ]
python
train
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1572-L1580
def p_select_from_where_statement_2(self, p): ''' statement : SELECT ANY variable_name FROM identifier WHERE expression | SELECT MANY variable_name FROM identifier WHERE expression ''' p[0] = SelectFromWhereNode(cardinality=p[2], variable_name=p[3], key_letter=p[5], where_clause=p[7])
[ "def", "p_select_from_where_statement_2", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "SelectFromWhereNode", "(", "cardinality", "=", "p", "[", "2", "]", ",", "variable_name", "=", "p", "[", "3", "]", ",", "key_letter", "=", "p", "[", "5", "]", ",", "where_clause", "=", "p", "[", "7", "]", ")" ]
statement : SELECT ANY variable_name FROM identifier WHERE expression | SELECT MANY variable_name FROM identifier WHERE expression
[ "statement", ":", "SELECT", "ANY", "variable_name", "FROM", "identifier", "WHERE", "expression", "|", "SELECT", "MANY", "variable_name", "FROM", "identifier", "WHERE", "expression" ]
python
test
xsleonard/pystmark
pystmark.py
https://github.com/xsleonard/pystmark/blob/329ccae1a7c8d57f28fa72cd8dbbee3e39413ed6/pystmark.py#L379-L387
def add_header(self, name, value): '''Attach an email header to send with the message. :param name: The name of the header value. :param value: The header value. ''' if self.headers is None: self.headers = [] self.headers.append(dict(Name=name, Value=value))
[ "def", "add_header", "(", "self", ",", "name", ",", "value", ")", ":", "if", "self", ".", "headers", "is", "None", ":", "self", ".", "headers", "=", "[", "]", "self", ".", "headers", ".", "append", "(", "dict", "(", "Name", "=", "name", ",", "Value", "=", "value", ")", ")" ]
Attach an email header to send with the message. :param name: The name of the header value. :param value: The header value.
[ "Attach", "an", "email", "header", "to", "send", "with", "the", "message", "." ]
python
train
openid/python-openid
openid/consumer/consumer.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/consumer.py#L592-L608
def begin(self, service_endpoint): """Create an AuthRequest object for the specified service_endpoint. This method will create an association if necessary.""" if self.store is None: assoc = None else: assoc = self._getAssociation(service_endpoint) request = AuthRequest(service_endpoint, assoc) request.return_to_args[self.openid1_nonce_query_arg_name] = mkNonce() if request.message.isOpenID1(): request.return_to_args[self.openid1_return_to_identifier_name] = \ request.endpoint.claimed_id return request
[ "def", "begin", "(", "self", ",", "service_endpoint", ")", ":", "if", "self", ".", "store", "is", "None", ":", "assoc", "=", "None", "else", ":", "assoc", "=", "self", ".", "_getAssociation", "(", "service_endpoint", ")", "request", "=", "AuthRequest", "(", "service_endpoint", ",", "assoc", ")", "request", ".", "return_to_args", "[", "self", ".", "openid1_nonce_query_arg_name", "]", "=", "mkNonce", "(", ")", "if", "request", ".", "message", ".", "isOpenID1", "(", ")", ":", "request", ".", "return_to_args", "[", "self", ".", "openid1_return_to_identifier_name", "]", "=", "request", ".", "endpoint", ".", "claimed_id", "return", "request" ]
Create an AuthRequest object for the specified service_endpoint. This method will create an association if necessary.
[ "Create", "an", "AuthRequest", "object", "for", "the", "specified", "service_endpoint", ".", "This", "method", "will", "create", "an", "association", "if", "necessary", "." ]
python
train
bodylabs/lace
lace/meshviewer.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/meshviewer.py#L840-L869
def on_click(self, button, button_state, cursor_x, cursor_y): """ Mouse button clicked. Glut calls this function when a mouse button is clicked or released. """ self.isdragging = False if button == glut.GLUT_LEFT_BUTTON and button_state == glut.GLUT_UP: # Left button released self.lastrot = copy.copy(self.thisrot) # Set Last Static Rotation To Last Dynamic One elif button == glut.GLUT_LEFT_BUTTON and button_state == glut.GLUT_DOWN: # Left button clicked down self.lastrot = copy.copy(self.thisrot) # Set Last Static Rotation To Last Dynamic One self.isdragging = True # Prepare For Dragging mouse_pt = arcball.Point2fT(cursor_x, cursor_y) self.arcball.click(mouse_pt) # Update Start Vector And Prepare For Dragging elif button == glut.GLUT_RIGHT_BUTTON and button_state == glut.GLUT_DOWN: # If a mouse click location was requested, return it to caller if hasattr(self, 'event_port'): self.mouseclick_port = self.event_port del self.event_port if hasattr(self, 'mouseclick_port'): self.send_mouseclick_to_caller(cursor_x, cursor_y) elif button == glut.GLUT_MIDDLE_BUTTON and button_state == glut.GLUT_DOWN: # If a mouse click location was requested, return it to caller if hasattr(self, 'event_port'): self.mouseclick_port = self.event_port del self.event_port if hasattr(self, 'mouseclick_port'): self.send_mouseclick_to_caller(cursor_x, cursor_y, button='middle') glut.glutPostRedisplay()
[ "def", "on_click", "(", "self", ",", "button", ",", "button_state", ",", "cursor_x", ",", "cursor_y", ")", ":", "self", ".", "isdragging", "=", "False", "if", "button", "==", "glut", ".", "GLUT_LEFT_BUTTON", "and", "button_state", "==", "glut", ".", "GLUT_UP", ":", "# Left button released", "self", ".", "lastrot", "=", "copy", ".", "copy", "(", "self", ".", "thisrot", ")", "# Set Last Static Rotation To Last Dynamic One", "elif", "button", "==", "glut", ".", "GLUT_LEFT_BUTTON", "and", "button_state", "==", "glut", ".", "GLUT_DOWN", ":", "# Left button clicked down", "self", ".", "lastrot", "=", "copy", ".", "copy", "(", "self", ".", "thisrot", ")", "# Set Last Static Rotation To Last Dynamic One", "self", ".", "isdragging", "=", "True", "# Prepare For Dragging", "mouse_pt", "=", "arcball", ".", "Point2fT", "(", "cursor_x", ",", "cursor_y", ")", "self", ".", "arcball", ".", "click", "(", "mouse_pt", ")", "# Update Start Vector And Prepare For Dragging", "elif", "button", "==", "glut", ".", "GLUT_RIGHT_BUTTON", "and", "button_state", "==", "glut", ".", "GLUT_DOWN", ":", "# If a mouse click location was requested, return it to caller", "if", "hasattr", "(", "self", ",", "'event_port'", ")", ":", "self", ".", "mouseclick_port", "=", "self", ".", "event_port", "del", "self", ".", "event_port", "if", "hasattr", "(", "self", ",", "'mouseclick_port'", ")", ":", "self", ".", "send_mouseclick_to_caller", "(", "cursor_x", ",", "cursor_y", ")", "elif", "button", "==", "glut", ".", "GLUT_MIDDLE_BUTTON", "and", "button_state", "==", "glut", ".", "GLUT_DOWN", ":", "# If a mouse click location was requested, return it to caller", "if", "hasattr", "(", "self", ",", "'event_port'", ")", ":", "self", ".", "mouseclick_port", "=", "self", ".", "event_port", "del", "self", ".", "event_port", "if", "hasattr", "(", "self", ",", "'mouseclick_port'", ")", ":", "self", ".", "send_mouseclick_to_caller", "(", "cursor_x", ",", "cursor_y", ",", "button", "=", "'middle'", ")", "glut", ".", "glutPostRedisplay", "(", ")" ]
Mouse button clicked. Glut calls this function when a mouse button is clicked or released.
[ "Mouse", "button", "clicked", ".", "Glut", "calls", "this", "function", "when", "a", "mouse", "button", "is", "clicked", "or", "released", "." ]
python
train
thespacedoctor/transientNamer
transientNamer/search.py
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L973-L1106
def _parse_photometry_data( self, content, TNSId): """*parse photometry data from a row in the tns results content* **Key Arguments:** - ``content`` -- a table row from the TNS results page - ``TNSId`` -- the tns id of the transient **Return:** - ``photData`` -- a list of dictionaries of the photometry data - ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files """ self.log.info('starting the ``_parse_photometry_data`` method') photData = [] relatedFilesTable = [] # AT REPORT BLOCK ATBlock = re.search( r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""", content, flags=re.S # re.S ) if ATBlock: ATBlock = ATBlock.group() reports = re.finditer( r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""", ATBlock, flags=re.S # re.S ) relatedFiles = self._parse_related_files(ATBlock) for r in reports: header = re.search( r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""", r.group(), flags=0 # re.S ) try: header = header.groupdict() except: print r.group() header["TNSId"] = TNSId del header["reporters"] del header["surveyGroup"] del header["hostName"] del header["hostRedshift"] del header["mag"] del header["magFilter"] del header["obsDate"] del header["ra"] del header["dec"] if not self.comments: del header['sourceComment'] else: theseComments = header[ "sourceComment"].split("\n") header["sourceComment"] = "" for c in theseComments: header["sourceComment"] += " " + c.strip() header["sourceComment"] = header[ "sourceComment"].strip().replace('"', "'")[0:750] phot = re.finditer( r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""", r.group(), flags=0 # re.S ) filesAppended = False for p in phot: p = p.groupdict() del p["observer"] if p["limitingMag"] and not p["mag"]: p["mag"] = p["limitingMag"] p["limitingMag"] = 1 p["remarks"] = p["remarks"].replace( "[Last non detection]", "") else: p["limitingMag"] = 0 if not self.comments: del p["remarks"] p.update(header) if p["relatedFiles"] and filesAppended == False: filesAppended = True for f in relatedFiles: # ORDER THE DICTIONARY FOR THIS ROW OF # RESULTS thisFile = collections.OrderedDict() thisFile["TNSId"] = TNSId thisFile["filename"] = f[ "filepath"].split("/")[-1] thisFile["url"] = f["filepath"] if self.comments: thisFile["comment"] = f[ "fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750] thisFile["dateObs"] = p["obsdate"] thisFile["spec1phot2"] = 2 relatedFilesTable.append(thisFile) if not p["survey"] and not p["objectName"]: p["survey"] = p["sender"] del p["relatedFiles"] del p["sender"] # ORDER THE DICTIONARY FOR THIS ROW OF RESULTS orow = collections.OrderedDict() keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr", "magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"] for k, v in p.iteritems(): if k not in keyOrder: keyOrder.append(k) for k in keyOrder: try: orow[k] = p[k] except: self.log.info( "`%(k)s` not found in the source data for %(TNSId)s" % locals()) pass photData.append(orow) self.log.info('completed the ``_parse_photometry_data`` method') return photData, relatedFilesTable
[ "def", "_parse_photometry_data", "(", "self", ",", "content", ",", "TNSId", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_parse_photometry_data`` method'", ")", "photData", "=", "[", "]", "relatedFilesTable", "=", "[", "]", "# AT REPORT BLOCK", "ATBlock", "=", "re", ".", "search", "(", "r\"\"\"<tr class=[^\\n]*?AT reportings.*?(?=<tr class=[^\\n]*?Classification reportings|$)\"\"\"", ",", "content", ",", "flags", "=", "re", ".", "S", "# re.S", ")", "if", "ATBlock", ":", "ATBlock", "=", "ATBlock", ".", "group", "(", ")", "reports", "=", "re", ".", "finditer", "(", "r\"\"\"<tr class=\"row-[^\"]*\"><td class=\"cell-id\">.*?</table>\"\"\"", ",", "ATBlock", ",", "flags", "=", "re", ".", "S", "# re.S", ")", "relatedFiles", "=", "self", ".", "_parse_related_files", "(", "ATBlock", ")", "for", "r", "in", "reports", ":", "header", "=", "re", ".", "search", "(", "r\"\"\"<tr class=\"row[^\"]*\".*?time_received\">(?P<reportAddedDate>[^<]*).*?user_name\">(?P<sender>[^<]*).*?reporter_name\">(?P<reporters>[^<]*).*?source_group_name\">(?P<surveyGroup>[^<]*).*?ra\">(?P<ra>[^<]*).*?decl\">(?P<dec>[^<]*).*?discovery_date\">(?P<obsDate>[^<]*).*?flux\">(?P<mag>[^<]*).*?filter_name\">(?P<magFilter>[^<]*).*?related_files\">(?P<relatedFiles>[^<]*).*?type_name\">(?P<suggestedType>[^<]*).*?hostname\">(?P<hostName>[^<]*).*?host_redshift\">(?P<hostRedshift>[^<]*).*?internal_name\">(?P<objectName>[^<]*).*?groups\">(?P<survey>[^<]*).*?remarks\">(?P<sourceComment>[^<]*)\"\"\"", ",", "r", ".", "group", "(", ")", ",", "flags", "=", "0", "# re.S", ")", "try", ":", "header", "=", "header", ".", "groupdict", "(", ")", "except", ":", "print", "r", ".", "group", "(", ")", "header", "[", "\"TNSId\"", "]", "=", "TNSId", "del", "header", "[", "\"reporters\"", "]", "del", "header", "[", "\"surveyGroup\"", "]", "del", "header", "[", "\"hostName\"", "]", "del", "header", "[", "\"hostRedshift\"", "]", "del", "header", "[", "\"mag\"", "]", "del", "header", "[", "\"magFilter\"", "]", "del", "header", "[", "\"obsDate\"", "]", "del", "header", "[", "\"ra\"", "]", "del", "header", "[", "\"dec\"", "]", "if", "not", "self", ".", "comments", ":", "del", "header", "[", "'sourceComment'", "]", "else", ":", "theseComments", "=", "header", "[", "\"sourceComment\"", "]", ".", "split", "(", "\"\\n\"", ")", "header", "[", "\"sourceComment\"", "]", "=", "\"\"", "for", "c", "in", "theseComments", ":", "header", "[", "\"sourceComment\"", "]", "+=", "\" \"", "+", "c", ".", "strip", "(", ")", "header", "[", "\"sourceComment\"", "]", "=", "header", "[", "\"sourceComment\"", "]", ".", "strip", "(", ")", ".", "replace", "(", "'\"'", ",", "\"'\"", ")", "[", "0", ":", "750", "]", "phot", "=", "re", ".", "finditer", "(", "r\"\"\"<tr class=\"row\\-[^\"]*\".*?obsdate\">(?P<obsdate>[^<]*).*?flux\">(?P<mag>[^<]*).*?fluxerr\">(?P<magErr>[^<]*).*?limflux\">(?P<limitingMag>[^<]*).*?unit_name\">(?P<magUnit>[^<]*).*?filter_name\">(?P<filter>[^<]*).*?tel_inst\">(?P<telescope>[^<]*).*?exptime\">(?P<exptime>[^<]*).*?observer\">(?P<observer>[^<]*).*?-remarks\">(?P<remarks>[^<]*)\"\"\"", ",", "r", ".", "group", "(", ")", ",", "flags", "=", "0", "# re.S", ")", "filesAppended", "=", "False", "for", "p", "in", "phot", ":", "p", "=", "p", ".", "groupdict", "(", ")", "del", "p", "[", "\"observer\"", "]", "if", "p", "[", "\"limitingMag\"", "]", "and", "not", "p", "[", "\"mag\"", "]", ":", "p", "[", "\"mag\"", "]", "=", "p", "[", "\"limitingMag\"", "]", "p", "[", "\"limitingMag\"", "]", "=", "1", "p", "[", "\"remarks\"", "]", "=", "p", "[", "\"remarks\"", "]", ".", "replace", "(", "\"[Last non detection]\"", ",", "\"\"", ")", "else", ":", "p", "[", "\"limitingMag\"", "]", "=", "0", "if", "not", "self", ".", "comments", ":", "del", "p", "[", "\"remarks\"", "]", "p", ".", "update", "(", "header", ")", "if", "p", "[", "\"relatedFiles\"", "]", "and", "filesAppended", "==", "False", ":", "filesAppended", "=", "True", "for", "f", "in", "relatedFiles", ":", "# ORDER THE DICTIONARY FOR THIS ROW OF", "# RESULTS", "thisFile", "=", "collections", ".", "OrderedDict", "(", ")", "thisFile", "[", "\"TNSId\"", "]", "=", "TNSId", "thisFile", "[", "\"filename\"", "]", "=", "f", "[", "\"filepath\"", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "thisFile", "[", "\"url\"", "]", "=", "f", "[", "\"filepath\"", "]", "if", "self", ".", "comments", ":", "thisFile", "[", "\"comment\"", "]", "=", "f", "[", "\"fileComment\"", "]", ".", "replace", "(", "\"\\n\"", ",", "\" \"", ")", ".", "strip", "(", ")", ".", "replace", "(", "'\"'", ",", "\"'\"", ")", "[", "0", ":", "750", "]", "thisFile", "[", "\"dateObs\"", "]", "=", "p", "[", "\"obsdate\"", "]", "thisFile", "[", "\"spec1phot2\"", "]", "=", "2", "relatedFilesTable", ".", "append", "(", "thisFile", ")", "if", "not", "p", "[", "\"survey\"", "]", "and", "not", "p", "[", "\"objectName\"", "]", ":", "p", "[", "\"survey\"", "]", "=", "p", "[", "\"sender\"", "]", "del", "p", "[", "\"relatedFiles\"", "]", "del", "p", "[", "\"sender\"", "]", "# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS", "orow", "=", "collections", ".", "OrderedDict", "(", ")", "keyOrder", "=", "[", "\"TNSId\"", ",", "\"survey\"", ",", "\"obsdate\"", ",", "\"filter\"", ",", "\"limitingMag\"", ",", "\"mag\"", ",", "\"magErr\"", ",", "\"magUnit\"", ",", "\"suggestedType\"", ",", "\"telescope\"", ",", "\"exptime\"", ",", "\"reportAddedDate\"", "]", "for", "k", ",", "v", "in", "p", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "keyOrder", ":", "keyOrder", ".", "append", "(", "k", ")", "for", "k", "in", "keyOrder", ":", "try", ":", "orow", "[", "k", "]", "=", "p", "[", "k", "]", "except", ":", "self", ".", "log", ".", "info", "(", "\"`%(k)s` not found in the source data for %(TNSId)s\"", "%", "locals", "(", ")", ")", "pass", "photData", ".", "append", "(", "orow", ")", "self", ".", "log", ".", "info", "(", "'completed the ``_parse_photometry_data`` method'", ")", "return", "photData", ",", "relatedFilesTable" ]
*parse photometry data from a row in the tns results content* **Key Arguments:** - ``content`` -- a table row from the TNS results page - ``TNSId`` -- the tns id of the transient **Return:** - ``photData`` -- a list of dictionaries of the photometry data - ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
[ "*", "parse", "photometry", "data", "from", "a", "row", "in", "the", "tns", "results", "content", "*" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L226-L239
def qos_map_cos_mutation_cos4(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") cos_mutation = ET.SubElement(map, "cos-mutation") name_key = ET.SubElement(cos_mutation, "name") name_key.text = kwargs.pop('name') cos4 = ET.SubElement(cos_mutation, "cos4") cos4.text = kwargs.pop('cos4') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_map_cos_mutation_cos4", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "map", "=", "ET", ".", "SubElement", "(", "qos", ",", "\"map\"", ")", "cos_mutation", "=", "ET", ".", "SubElement", "(", "map", ",", "\"cos-mutation\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "cos_mutation", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "cos4", "=", "ET", ".", "SubElement", "(", "cos_mutation", ",", "\"cos4\"", ")", "cos4", ".", "text", "=", "kwargs", ".", "pop", "(", "'cos4'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
auth0/auth0-python
auth0/v3/management/client_grants.py
https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/client_grants.py#L27-L54
def all(self, audience=None, page=None, per_page=None, include_totals=False, client_id=None): """Retrieves all client grants. Args: audience (str, optional): URL encoded audience of a Resource Server to filter page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. client_id (string, optional): The id of a client to filter See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants """ params = { 'audience': audience, 'page': page, 'per_page': per_page, 'include_totals': str(include_totals).lower(), 'client_id': client_id, } return self.client.get(self._url(), params=params)
[ "def", "all", "(", "self", ",", "audience", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "include_totals", "=", "False", ",", "client_id", "=", "None", ")", ":", "params", "=", "{", "'audience'", ":", "audience", ",", "'page'", ":", "page", ",", "'per_page'", ":", "per_page", ",", "'include_totals'", ":", "str", "(", "include_totals", ")", ".", "lower", "(", ")", ",", "'client_id'", ":", "client_id", ",", "}", "return", "self", ".", "client", ".", "get", "(", "self", ".", "_url", "(", ")", ",", "params", "=", "params", ")" ]
Retrieves all client grants. Args: audience (str, optional): URL encoded audience of a Resource Server to filter page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. client_id (string, optional): The id of a client to filter See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants
[ "Retrieves", "all", "client", "grants", "." ]
python
train
fastai/fastai
fastai/data_block.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L294-L302
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList': "Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name." pat = re.compile(pat) def _inner(o): s = str((os.path.join(self.path,o) if full_path else o).as_posix()) res = pat.search(s) assert res,f'Failed to find "{pat}" in "{s}"' return res.group(1) return self.label_from_func(_inner, label_cls=label_cls, **kwargs)
[ "def", "label_from_re", "(", "self", ",", "pat", ":", "str", ",", "full_path", ":", "bool", "=", "False", ",", "label_cls", ":", "Callable", "=", "None", ",", "*", "*", "kwargs", ")", "->", "'LabelList'", ":", "pat", "=", "re", ".", "compile", "(", "pat", ")", "def", "_inner", "(", "o", ")", ":", "s", "=", "str", "(", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "o", ")", "if", "full_path", "else", "o", ")", ".", "as_posix", "(", ")", ")", "res", "=", "pat", ".", "search", "(", "s", ")", "assert", "res", ",", "f'Failed to find \"{pat}\" in \"{s}\"'", "return", "res", ".", "group", "(", "1", ")", "return", "self", ".", "label_from_func", "(", "_inner", ",", "label_cls", "=", "label_cls", ",", "*", "*", "kwargs", ")" ]
Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name.
[ "Apply", "the", "re", "in", "pat", "to", "determine", "the", "label", "of", "every", "filename", ".", "If", "full_path", "search", "in", "the", "full", "name", "." ]
python
train
aparo/pyes
pyes/orm/queryset.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/orm/queryset.py#L1158-L1164
def size(self, size): """ Set the query size of this QuerySet should execute its query against. """ clone = self._clone() clone._size = size return clone
[ "def", "size", "(", "self", ",", "size", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "clone", ".", "_size", "=", "size", "return", "clone" ]
Set the query size of this QuerySet should execute its query against.
[ "Set", "the", "query", "size", "of", "this", "QuerySet", "should", "execute", "its", "query", "against", "." ]
python
train
tommyod/streprogen
streprogen/utils.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/utils.py#L197-L224
def min_between(min_reps=3, max_reps=8, percentile=0.33): """Function to decide the minimum number of reps to perform given `min_reps` and `max_rep`. Parameters ---------- min_reps The minimum number of repeitions. max_reps The maximum number of repetitions. percentile The percentile to cap at. Return ------- (low, high) A tuple containing a new rep range. Examples ------- >>> min_between(min_reps = 3, max_reps = 8, percentile = 0.33) (3, 5) """ higher_limit = min_reps + (max_reps - min_reps) * percentile return min_reps, math.ceil(higher_limit)
[ "def", "min_between", "(", "min_reps", "=", "3", ",", "max_reps", "=", "8", ",", "percentile", "=", "0.33", ")", ":", "higher_limit", "=", "min_reps", "+", "(", "max_reps", "-", "min_reps", ")", "*", "percentile", "return", "min_reps", ",", "math", ".", "ceil", "(", "higher_limit", ")" ]
Function to decide the minimum number of reps to perform given `min_reps` and `max_rep`. Parameters ---------- min_reps The minimum number of repeitions. max_reps The maximum number of repetitions. percentile The percentile to cap at. Return ------- (low, high) A tuple containing a new rep range. Examples ------- >>> min_between(min_reps = 3, max_reps = 8, percentile = 0.33) (3, 5)
[ "Function", "to", "decide", "the", "minimum", "number", "of", "reps", "to", "perform", "given", "min_reps", "and", "max_rep", "." ]
python
train
ericmjl/hiveplot
hiveplot/hiveplot.py
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L105-L110
def initialize_major_angle(self): """ Computes the major angle: 2pi radians / number of groups. """ num_groups = len(self.nodes.keys()) self.major_angle = 2 * np.pi / num_groups
[ "def", "initialize_major_angle", "(", "self", ")", ":", "num_groups", "=", "len", "(", "self", ".", "nodes", ".", "keys", "(", ")", ")", "self", ".", "major_angle", "=", "2", "*", "np", ".", "pi", "/", "num_groups" ]
Computes the major angle: 2pi radians / number of groups.
[ "Computes", "the", "major", "angle", ":", "2pi", "radians", "/", "number", "of", "groups", "." ]
python
valid
pypa/pipenv
tasks/vendoring/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/tasks/vendoring/__init__.py#L588-L600
def license_fallback(vendor_dir, sdist_name): """Hardcoded license URLs. Check when updating if those are still needed""" libname = libname_from_dir(sdist_name) if libname not in HARDCODED_LICENSE_URLS: raise ValueError('No hardcoded URL for {} license'.format(libname)) url = HARDCODED_LICENSE_URLS[libname] _, _, name = url.rpartition('/') dest = license_destination(vendor_dir, libname, name) r = requests.get(url, allow_redirects=True) log('Downloading {}'.format(url)) r.raise_for_status() dest.write_bytes(r.content)
[ "def", "license_fallback", "(", "vendor_dir", ",", "sdist_name", ")", ":", "libname", "=", "libname_from_dir", "(", "sdist_name", ")", "if", "libname", "not", "in", "HARDCODED_LICENSE_URLS", ":", "raise", "ValueError", "(", "'No hardcoded URL for {} license'", ".", "format", "(", "libname", ")", ")", "url", "=", "HARDCODED_LICENSE_URLS", "[", "libname", "]", "_", ",", "_", ",", "name", "=", "url", ".", "rpartition", "(", "'/'", ")", "dest", "=", "license_destination", "(", "vendor_dir", ",", "libname", ",", "name", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "allow_redirects", "=", "True", ")", "log", "(", "'Downloading {}'", ".", "format", "(", "url", ")", ")", "r", ".", "raise_for_status", "(", ")", "dest", ".", "write_bytes", "(", "r", ".", "content", ")" ]
Hardcoded license URLs. Check when updating if those are still needed
[ "Hardcoded", "license", "URLs", ".", "Check", "when", "updating", "if", "those", "are", "still", "needed" ]
python
train
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2064-L2094
def jobsGetFields(self, jobIDs, fields, requireAll=True): """ Fetch the values of 1 or more fields from a sequence of job records. Here, 'fields' is a sequence (list or tuple) with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the job IDs passed in!!! Parameters: ---------------------------------------------------------------- jobIDs: A sequence of jobIDs fields: A list of fields to return for each jobID Returns: A list of tuples->(jobID, [field1, field2,...]) """ assert isinstance(jobIDs, self._SEQUENCE_TYPES) assert len(jobIDs) >=1 rows = self._getMatchingRowsWithRetries( self._jobs, dict(job_id=jobIDs), ['job_id'] + [self._jobs.pubToDBNameDict[x] for x in fields]) if requireAll and len(rows) < len(jobIDs): # NOTE: this will also trigger if the jobIDs list included duplicates raise RuntimeError("jobIDs %s not found within the jobs table" % ( (set(jobIDs) - set(r[0] for r in rows)),)) return [(r[0], list(r[1:])) for r in rows]
[ "def", "jobsGetFields", "(", "self", ",", "jobIDs", ",", "fields", ",", "requireAll", "=", "True", ")", ":", "assert", "isinstance", "(", "jobIDs", ",", "self", ".", "_SEQUENCE_TYPES", ")", "assert", "len", "(", "jobIDs", ")", ">=", "1", "rows", "=", "self", ".", "_getMatchingRowsWithRetries", "(", "self", ".", "_jobs", ",", "dict", "(", "job_id", "=", "jobIDs", ")", ",", "[", "'job_id'", "]", "+", "[", "self", ".", "_jobs", ".", "pubToDBNameDict", "[", "x", "]", "for", "x", "in", "fields", "]", ")", "if", "requireAll", "and", "len", "(", "rows", ")", "<", "len", "(", "jobIDs", ")", ":", "# NOTE: this will also trigger if the jobIDs list included duplicates", "raise", "RuntimeError", "(", "\"jobIDs %s not found within the jobs table\"", "%", "(", "(", "set", "(", "jobIDs", ")", "-", "set", "(", "r", "[", "0", "]", "for", "r", "in", "rows", ")", ")", ",", ")", ")", "return", "[", "(", "r", "[", "0", "]", ",", "list", "(", "r", "[", "1", ":", "]", ")", ")", "for", "r", "in", "rows", "]" ]
Fetch the values of 1 or more fields from a sequence of job records. Here, 'fields' is a sequence (list or tuple) with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the job IDs passed in!!! Parameters: ---------------------------------------------------------------- jobIDs: A sequence of jobIDs fields: A list of fields to return for each jobID Returns: A list of tuples->(jobID, [field1, field2,...])
[ "Fetch", "the", "values", "of", "1", "or", "more", "fields", "from", "a", "sequence", "of", "job", "records", ".", "Here", "fields", "is", "a", "sequence", "(", "list", "or", "tuple", ")", "with", "the", "names", "of", "the", "fields", "to", "fetch", ".", "The", "names", "are", "the", "public", "names", "of", "the", "fields", "(", "camelBack", "not", "the", "lower_case_only", "form", "as", "stored", "in", "the", "DB", ")", "." ]
python
valid
ewels/MultiQC
multiqc/modules/dedup/dedup.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/dedup/dedup.py#L83-L98
def dedup_general_stats_table(self): """ Take the parsed stats from the DeDup report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['duplication_rate'] = { 'title': 'Duplication Rate', 'description': 'Percentage of reads categorised as a technical duplicate', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'OrRd', 'format': '{:,.0f}', 'modify': lambda x: x * 100.0 } self.general_stats_addcols(self.dedup_data, headers)
[ "def", "dedup_general_stats_table", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'duplication_rate'", "]", "=", "{", "'title'", ":", "'Duplication Rate'", ",", "'description'", ":", "'Percentage of reads categorised as a technical duplicate'", ",", "'min'", ":", "0", ",", "'max'", ":", "100", ",", "'suffix'", ":", "'%'", ",", "'scale'", ":", "'OrRd'", ",", "'format'", ":", "'{:,.0f}'", ",", "'modify'", ":", "lambda", "x", ":", "x", "*", "100.0", "}", "self", ".", "general_stats_addcols", "(", "self", ".", "dedup_data", ",", "headers", ")" ]
Take the parsed stats from the DeDup report and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "DeDup", "report", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
python
train
evonove/django-money-rates
djmoney_rates/utils.py
https://github.com/evonove/django-money-rates/blob/ac1f7636b9a38d3e153eb833019342c4d88634c2/djmoney_rates/utils.py#L24-L32
def get_rate_source(): """Get the default Rate Source and return it.""" backend = money_rates_settings.DEFAULT_BACKEND() try: return RateSource.objects.get(name=backend.get_source_name()) except RateSource.DoesNotExist: raise CurrencyConversionException( "Rate for %s source do not exists. " "Please run python manage.py update_rates" % backend.get_source_name())
[ "def", "get_rate_source", "(", ")", ":", "backend", "=", "money_rates_settings", ".", "DEFAULT_BACKEND", "(", ")", "try", ":", "return", "RateSource", ".", "objects", ".", "get", "(", "name", "=", "backend", ".", "get_source_name", "(", ")", ")", "except", "RateSource", ".", "DoesNotExist", ":", "raise", "CurrencyConversionException", "(", "\"Rate for %s source do not exists. \"", "\"Please run python manage.py update_rates\"", "%", "backend", ".", "get_source_name", "(", ")", ")" ]
Get the default Rate Source and return it.
[ "Get", "the", "default", "Rate", "Source", "and", "return", "it", "." ]
python
train
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L1696-L1716
def list(self, **kwargs): """Retrieve a list of objects. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Returns: list: The list of objects, or a generator if `as_list` is False Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server cannot perform the request """ path = self._compute_path('/projects/%(project_id)s/forks') return ListMixin.list(self, path=path, **kwargs)
[ "def", "list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_compute_path", "(", "'/projects/%(project_id)s/forks'", ")", "return", "ListMixin", ".", "list", "(", "self", ",", "path", "=", "path", ",", "*", "*", "kwargs", ")" ]
Retrieve a list of objects. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Returns: list: The list of objects, or a generator if `as_list` is False Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server cannot perform the request
[ "Retrieve", "a", "list", "of", "objects", "." ]
python
train
theno/fabsetup
fabsetup/fabutils.py
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabutils.py#L725-L732
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#', keep_backup=True): '''Remove the comment of an commented out line and make the line "active". If such an commented out line not exists it would be appended. ''' return uua_local(filename, prefix, new_line, comment, keep_backup, update_or_append_line=update_or_append_line)
[ "def", "uncomment_or_update_or_append_line", "(", "filename", ",", "prefix", ",", "new_line", ",", "comment", "=", "'#'", ",", "keep_backup", "=", "True", ")", ":", "return", "uua_local", "(", "filename", ",", "prefix", ",", "new_line", ",", "comment", ",", "keep_backup", ",", "update_or_append_line", "=", "update_or_append_line", ")" ]
Remove the comment of an commented out line and make the line "active". If such an commented out line not exists it would be appended.
[ "Remove", "the", "comment", "of", "an", "commented", "out", "line", "and", "make", "the", "line", "active", "." ]
python
train
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L111-L128
def coalescence_waiting_times(self, backward=True): '''Generator over the waiting times of successive coalescence events Args: ``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False`` ''' if not isinstance(backward, bool): raise TypeError("backward must be a bool") times = list(); lowest_leaf_dist = float('-inf') for n,d in self.distances_from_root(): if len(n.children) > 1: times.append(d) elif len(n.children) == 0 and d > lowest_leaf_dist: lowest_leaf_dist = d times.append(lowest_leaf_dist) times.sort(reverse=backward) for i in range(len(times)-1): yield abs(times[i]-times[i+1])
[ "def", "coalescence_waiting_times", "(", "self", ",", "backward", "=", "True", ")", ":", "if", "not", "isinstance", "(", "backward", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"backward must be a bool\"", ")", "times", "=", "list", "(", ")", "lowest_leaf_dist", "=", "float", "(", "'-inf'", ")", "for", "n", ",", "d", "in", "self", ".", "distances_from_root", "(", ")", ":", "if", "len", "(", "n", ".", "children", ")", ">", "1", ":", "times", ".", "append", "(", "d", ")", "elif", "len", "(", "n", ".", "children", ")", "==", "0", "and", "d", ">", "lowest_leaf_dist", ":", "lowest_leaf_dist", "=", "d", "times", ".", "append", "(", "lowest_leaf_dist", ")", "times", ".", "sort", "(", "reverse", "=", "backward", ")", "for", "i", "in", "range", "(", "len", "(", "times", ")", "-", "1", ")", ":", "yield", "abs", "(", "times", "[", "i", "]", "-", "times", "[", "i", "+", "1", "]", ")" ]
Generator over the waiting times of successive coalescence events Args: ``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
[ "Generator", "over", "the", "waiting", "times", "of", "successive", "coalescence", "events" ]
python
train
smarie/python-parsyfiles
parsyfiles/filesystem_mapping.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/filesystem_mapping.py#L67-L84
def create(location: str, simpleobjects_found = None, complexobject_attributes_found = None): # -> ObjectNotFoundOnFileSystemError: """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param location: :return: """ if len(complexobject_attributes_found) > 0 or len(simpleobjects_found) > 0: return ObjectNotFoundOnFileSystemError('Mandatory object : ' + location + ' could not be found on the file' ' system, either as a multifile or as a singlefile with any ' 'extension, but it seems that this is because you have left the ' 'extension in the location name. Please remove the file extension ' 'from the location name and try again') else: return ObjectNotFoundOnFileSystemError('Mandatory object : ' + location + ' could not be found on the file' ' system, either as a multifile or as a singlefile with any ' 'extension.')
[ "def", "create", "(", "location", ":", "str", ",", "simpleobjects_found", "=", "None", ",", "complexobject_attributes_found", "=", "None", ")", ":", "# -> ObjectNotFoundOnFileSystemError:", "if", "len", "(", "complexobject_attributes_found", ")", ">", "0", "or", "len", "(", "simpleobjects_found", ")", ">", "0", ":", "return", "ObjectNotFoundOnFileSystemError", "(", "'Mandatory object : '", "+", "location", "+", "' could not be found on the file'", "' system, either as a multifile or as a singlefile with any '", "'extension, but it seems that this is because you have left the '", "'extension in the location name. Please remove the file extension '", "'from the location name and try again'", ")", "else", ":", "return", "ObjectNotFoundOnFileSystemError", "(", "'Mandatory object : '", "+", "location", "+", "' could not be found on the file'", "' system, either as a multifile or as a singlefile with any '", "'extension.'", ")" ]
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param location: :return:
[ "Helper", "method", "provided", "because", "we", "actually", "can", "t", "put", "that", "in", "the", "constructor", "it", "creates", "a", "bug", "in", "Nose", "tests", "https", ":", "//", "github", ".", "com", "/", "nose", "-", "devs", "/", "nose", "/", "issues", "/", "725" ]
python
train
flatangle/flatlib
flatlib/dignities/accidental.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L74-L122
def haiz(obj, chart): """ Returns if an object is in Haiz. """ objGender = obj.gender() objFaction = obj.faction() if obj.id == const.MERCURY: # Gender and faction of mercury depends on orientality sun = chart.getObject(const.SUN) orientalityM = orientality(obj, sun) if orientalityM == ORIENTAL: objGender = const.MASCULINE objFaction = const.DIURNAL else: objGender = const.FEMININE objFaction = const.NOCTURNAL # Object gender match sign gender? signGender = props.sign.gender[obj.sign] genderConformity = (objGender == signGender) # Match faction factionConformity = False diurnalChart = chart.isDiurnal() if obj.id == const.SUN and not diurnalChart: # Sun is in conformity only when above horizon factionConformity = False else: # Get list of houses in the chart's diurnal faction if diurnalChart: diurnalFaction = props.house.aboveHorizon nocturnalFaction = props.house.belowHorizon else: diurnalFaction = props.house.belowHorizon nocturnalFaction = props.house.aboveHorizon # Get the object's house and match factions objHouse = chart.houses.getObjectHouse(obj) if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction): factionConformity = True # Match things if (genderConformity and factionConformity): return HAIZ elif (not genderConformity and not factionConformity): return CHAIZ else: return None
[ "def", "haiz", "(", "obj", ",", "chart", ")", ":", "objGender", "=", "obj", ".", "gender", "(", ")", "objFaction", "=", "obj", ".", "faction", "(", ")", "if", "obj", ".", "id", "==", "const", ".", "MERCURY", ":", "# Gender and faction of mercury depends on orientality", "sun", "=", "chart", ".", "getObject", "(", "const", ".", "SUN", ")", "orientalityM", "=", "orientality", "(", "obj", ",", "sun", ")", "if", "orientalityM", "==", "ORIENTAL", ":", "objGender", "=", "const", ".", "MASCULINE", "objFaction", "=", "const", ".", "DIURNAL", "else", ":", "objGender", "=", "const", ".", "FEMININE", "objFaction", "=", "const", ".", "NOCTURNAL", "# Object gender match sign gender?", "signGender", "=", "props", ".", "sign", ".", "gender", "[", "obj", ".", "sign", "]", "genderConformity", "=", "(", "objGender", "==", "signGender", ")", "# Match faction", "factionConformity", "=", "False", "diurnalChart", "=", "chart", ".", "isDiurnal", "(", ")", "if", "obj", ".", "id", "==", "const", ".", "SUN", "and", "not", "diurnalChart", ":", "# Sun is in conformity only when above horizon", "factionConformity", "=", "False", "else", ":", "# Get list of houses in the chart's diurnal faction", "if", "diurnalChart", ":", "diurnalFaction", "=", "props", ".", "house", ".", "aboveHorizon", "nocturnalFaction", "=", "props", ".", "house", ".", "belowHorizon", "else", ":", "diurnalFaction", "=", "props", ".", "house", ".", "belowHorizon", "nocturnalFaction", "=", "props", ".", "house", ".", "aboveHorizon", "# Get the object's house and match factions", "objHouse", "=", "chart", ".", "houses", ".", "getObjectHouse", "(", "obj", ")", "if", "(", "objFaction", "==", "const", ".", "DIURNAL", "and", "objHouse", ".", "id", "in", "diurnalFaction", "or", "objFaction", "==", "const", ".", "NOCTURNAL", "and", "objHouse", ".", "id", "in", "nocturnalFaction", ")", ":", "factionConformity", "=", "True", "# Match things", "if", "(", "genderConformity", "and", "factionConformity", ")", ":", "return", "HAIZ", "elif", "(", "not", "genderConformity", "and", "not", "factionConformity", ")", ":", "return", "CHAIZ", "else", ":", "return", "None" ]
Returns if an object is in Haiz.
[ "Returns", "if", "an", "object", "is", "in", "Haiz", "." ]
python
train
totalgood/nlpia
src/nlpia/clean_alice.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/clean_alice.py#L85-L98
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'): """ Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """ path = find_data_path(path) or path if os.path.isdir(path): paths = os.listdir(path) paths = [os.path.join(path, p) for p in paths] else: zf = zipfile.ZipFile(path) paths = [] for name in zf.namelist(): if '.hg/' in name: continue paths.append(zf.extract(name, path=BIGDATA_PATH)) return paths
[ "def", "extract_aiml", "(", "path", "=", "'aiml-en-us-foundation-alice.v1-9'", ")", ":", "path", "=", "find_data_path", "(", "path", ")", "or", "path", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "paths", "=", "os", ".", "listdir", "(", "path", ")", "paths", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "p", ")", "for", "p", "in", "paths", "]", "else", ":", "zf", "=", "zipfile", ".", "ZipFile", "(", "path", ")", "paths", "=", "[", "]", "for", "name", "in", "zf", ".", "namelist", "(", ")", ":", "if", "'.hg/'", "in", "name", ":", "continue", "paths", ".", "append", "(", "zf", ".", "extract", "(", "name", ",", "path", "=", "BIGDATA_PATH", ")", ")", "return", "paths" ]
Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths
[ "Extract", "an", "aiml", ".", "zip", "file", "if", "it", "hasn", "t", "been", "already", "and", "return", "a", "list", "of", "aiml", "file", "paths" ]
python
train
kwikteam/phy
phy/gui/widgets.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/widgets.py#L309-L312
def sort_by(self, name, sort_dir='asc'): """Sort by a given variable.""" logger.log(5, "Sort by `%s` %s.", name, sort_dir) self.eval_js('table.sortBy("{}", "{}");'.format(name, sort_dir))
[ "def", "sort_by", "(", "self", ",", "name", ",", "sort_dir", "=", "'asc'", ")", ":", "logger", ".", "log", "(", "5", ",", "\"Sort by `%s` %s.\"", ",", "name", ",", "sort_dir", ")", "self", ".", "eval_js", "(", "'table.sortBy(\"{}\", \"{}\");'", ".", "format", "(", "name", ",", "sort_dir", ")", ")" ]
Sort by a given variable.
[ "Sort", "by", "a", "given", "variable", "." ]
python
train
estnltk/estnltk
estnltk/wordnet/wn.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L775-L787
def examples(self): """Returns the examples of the synset. Returns ------- list of str List of its variants' examples. """ examples = [] for example in [variant.examples for variant in self._raw_synset.variants if len(variant.examples)]: examples.extend(example) return examples
[ "def", "examples", "(", "self", ")", ":", "examples", "=", "[", "]", "for", "example", "in", "[", "variant", ".", "examples", "for", "variant", "in", "self", ".", "_raw_synset", ".", "variants", "if", "len", "(", "variant", ".", "examples", ")", "]", ":", "examples", ".", "extend", "(", "example", ")", "return", "examples" ]
Returns the examples of the synset. Returns ------- list of str List of its variants' examples.
[ "Returns", "the", "examples", "of", "the", "synset", ".", "Returns", "-------", "list", "of", "str", "List", "of", "its", "variants", "examples", "." ]
python
train
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L714-L727
def get_innerclasses(self): """ sequence of JavaInnerClassInfo instances describing the inner classes of this class definition reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.6 """ # noqa buff = self.get_attribute("InnerClasses") if buff is None: return tuple() with unpack(buff) as up: return tuple(up.unpack_objects(JavaInnerClassInfo, self.cpool))
[ "def", "get_innerclasses", "(", "self", ")", ":", "# noqa", "buff", "=", "self", ".", "get_attribute", "(", "\"InnerClasses\"", ")", "if", "buff", "is", "None", ":", "return", "tuple", "(", ")", "with", "unpack", "(", "buff", ")", "as", "up", ":", "return", "tuple", "(", "up", ".", "unpack_objects", "(", "JavaInnerClassInfo", ",", "self", ".", "cpool", ")", ")" ]
sequence of JavaInnerClassInfo instances describing the inner classes of this class definition reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.6
[ "sequence", "of", "JavaInnerClassInfo", "instances", "describing", "the", "inner", "classes", "of", "this", "class", "definition" ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L3431-L3457
def is_all_field_none(self): """ :rtype: bool """ if self._color is not None: return False if self._alias is not None: return False if self._description is not None: return False if self._attachment is not None: return False if self._pointer is not None: return False if self._status is not None: return False if self._redirect_url is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_color", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_alias", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_description", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_attachment", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_pointer", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_status", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_redirect_url", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
trolldbois/ctypeslib
ctypeslib/codegen/cursorhandler.py
https://github.com/trolldbois/ctypeslib/blob/2aeb1942a5a32a5cc798c287cd0d9e684a0181a8/ctypeslib/codegen/cursorhandler.py#L132-L140
def ENUM_CONSTANT_DECL(self, cursor): """Gets the enumeration values""" name = cursor.displayname value = cursor.enum_value pname = self.get_unique_name(cursor.semantic_parent) parent = self.get_registered(pname) obj = typedesc.EnumValue(name, value, parent) parent.add_value(obj) return obj
[ "def", "ENUM_CONSTANT_DECL", "(", "self", ",", "cursor", ")", ":", "name", "=", "cursor", ".", "displayname", "value", "=", "cursor", ".", "enum_value", "pname", "=", "self", ".", "get_unique_name", "(", "cursor", ".", "semantic_parent", ")", "parent", "=", "self", ".", "get_registered", "(", "pname", ")", "obj", "=", "typedesc", ".", "EnumValue", "(", "name", ",", "value", ",", "parent", ")", "parent", ".", "add_value", "(", "obj", ")", "return", "obj" ]
Gets the enumeration values
[ "Gets", "the", "enumeration", "values" ]
python
train
roclark/sportsreference
sportsreference/ncaab/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/schedule.py#L137-L165
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the boxscore string. """ if self._points_for is None and self._points_against is None: return None fields_to_include = { 'arena': self.arena, 'boxscore_index': self.boxscore_index, 'date': self.date, 'datetime': self.datetime, 'game': self.game, 'location': self.location, 'opponent_abbr': self.opponent_abbr, 'opponent_conference': self.opponent_conference, 'opponent_name': self.opponent_name, 'opponent_rank': self.opponent_rank, 'overtimes': self.overtimes, 'points_against': self.points_against, 'points_for': self.points_for, 'result': self.result, 'season_losses': self.season_losses, 'season_wins': self.season_wins, 'streak': self.streak, 'time': self.time, 'type': self.type } return pd.DataFrame([fields_to_include], index=[self._boxscore])
[ "def", "dataframe", "(", "self", ")", ":", "if", "self", ".", "_points_for", "is", "None", "and", "self", ".", "_points_against", "is", "None", ":", "return", "None", "fields_to_include", "=", "{", "'arena'", ":", "self", ".", "arena", ",", "'boxscore_index'", ":", "self", ".", "boxscore_index", ",", "'date'", ":", "self", ".", "date", ",", "'datetime'", ":", "self", ".", "datetime", ",", "'game'", ":", "self", ".", "game", ",", "'location'", ":", "self", ".", "location", ",", "'opponent_abbr'", ":", "self", ".", "opponent_abbr", ",", "'opponent_conference'", ":", "self", ".", "opponent_conference", ",", "'opponent_name'", ":", "self", ".", "opponent_name", ",", "'opponent_rank'", ":", "self", ".", "opponent_rank", ",", "'overtimes'", ":", "self", ".", "overtimes", ",", "'points_against'", ":", "self", ".", "points_against", ",", "'points_for'", ":", "self", ".", "points_for", ",", "'result'", ":", "self", ".", "result", ",", "'season_losses'", ":", "self", ".", "season_losses", ",", "'season_wins'", ":", "self", ".", "season_wins", ",", "'streak'", ":", "self", ".", "streak", ",", "'time'", ":", "self", ".", "time", ",", "'type'", ":", "self", ".", "type", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_boxscore", "]", ")" ]
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the boxscore string.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "boxscore", "string", "." ]
python
train
Enteee/pdml2flow
pdml2flow/autovivification.py
https://github.com/Enteee/pdml2flow/blob/bc9efe379b0b2406bfbbbd8e0f678b1f63805c66/pdml2flow/autovivification.py#L21-L34
def clean_empty(self, d=DEFAULT): """Returns a copy of d without empty leaves. https://stackoverflow.com/questions/27973988/python-how-to-remove-all-empty-fields-in-a-nested-dict/35263074 """ if d is DEFAULT: d = self if isinstance(d, list): return [v for v in (self.clean_empty(v) for v in d) if v or v == 0] elif isinstance(d, type(self)): return type(self)({k: v for k, v in ((k, self.clean_empty(v)) for k, v in d.items()) if v or v == 0}) elif isinstance(d, dict): return {k: v for k, v in ((k, self.clean_empty(v)) for k, v in d.items()) if v or v == 0} return d
[ "def", "clean_empty", "(", "self", ",", "d", "=", "DEFAULT", ")", ":", "if", "d", "is", "DEFAULT", ":", "d", "=", "self", "if", "isinstance", "(", "d", ",", "list", ")", ":", "return", "[", "v", "for", "v", "in", "(", "self", ".", "clean_empty", "(", "v", ")", "for", "v", "in", "d", ")", "if", "v", "or", "v", "==", "0", "]", "elif", "isinstance", "(", "d", ",", "type", "(", "self", ")", ")", ":", "return", "type", "(", "self", ")", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "self", ".", "clean_empty", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "if", "v", "or", "v", "==", "0", "}", ")", "elif", "isinstance", "(", "d", ",", "dict", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "self", ".", "clean_empty", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "if", "v", "or", "v", "==", "0", "}", "return", "d" ]
Returns a copy of d without empty leaves. https://stackoverflow.com/questions/27973988/python-how-to-remove-all-empty-fields-in-a-nested-dict/35263074
[ "Returns", "a", "copy", "of", "d", "without", "empty", "leaves", "." ]
python
train
tehmaze/natural
natural/data.py
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L78-L97
def printable(sequence): ''' Return a printable string from the input ``sequence`` :param sequence: byte or string sequence >>> print(printable('\\x1b[1;34mtest\\x1b[0m')) .[1;34mtest.[0m >>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........' True >>> print(printable('12345678')) 12345678 >>> print(printable('testing\\n')) testing. ''' return ''.join(list( map(lambda c: c if c in PRINTABLE else '.', sequence) ))
[ "def", "printable", "(", "sequence", ")", ":", "return", "''", ".", "join", "(", "list", "(", "map", "(", "lambda", "c", ":", "c", "if", "c", "in", "PRINTABLE", "else", "'.'", ",", "sequence", ")", ")", ")" ]
Return a printable string from the input ``sequence`` :param sequence: byte or string sequence >>> print(printable('\\x1b[1;34mtest\\x1b[0m')) .[1;34mtest.[0m >>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........' True >>> print(printable('12345678')) 12345678 >>> print(printable('testing\\n')) testing.
[ "Return", "a", "printable", "string", "from", "the", "input", "sequence" ]
python
train
a1ezzz/wasp-general
wasp_general/network/messenger/onion.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/messenger/onion.py#L69-L72
def layers_names(self): """ :meth:`.WMessengerOnionProto.layer_names` method implementation. """ return list(self.__class__.__builtin_layers__.keys()) + list(self.__layers.keys())
[ "def", "layers_names", "(", "self", ")", ":", "return", "list", "(", "self", ".", "__class__", ".", "__builtin_layers__", ".", "keys", "(", ")", ")", "+", "list", "(", "self", ".", "__layers", ".", "keys", "(", ")", ")" ]
:meth:`.WMessengerOnionProto.layer_names` method implementation.
[ ":", "meth", ":", ".", "WMessengerOnionProto", ".", "layer_names", "method", "implementation", "." ]
python
train
sys-git/certifiable
certifiable/complex.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/complex.py#L84-L160
def certify_dict( value, schema=None, allow_extra=False, required=True, key_certifier=None, value_certifier=None, include_collections=False, ): """ Certifies a dictionary, checking it against an optional schema. The schema should be a dictionary, with keys corresponding to the expected keys in `value`, but with the values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_dict(schema={ ... 'id': certify_key(kind='Model'), ... 'count': certify_int(min=0), ... }) >>> certifier({'id': self.key, 'count': self.count}) :param dict|Mapping|MutableMapping value: The value to be certified. :param dict schema: The schema against which the value should be checked. :param bool allow_extra: Set to `True` to ignore extra keys. :param bool required: Whether the value can't be `None`. Defaults to True. :param callable key_certifier: callable that receives the key to certify (ignoring schema keys). :param callable value_certifier: callable that receives the value to certify (ignoring schema values). :param bool include_collections: Include types from collections. :return: The certified dict. :rtype: dict|Mapping|MutableMapping :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The value is invalid """ cls = dict # Certify our kwargs: certify_params( (certify_bool, 'allow_extra', allow_extra), (certify_bool, 'include_collections', include_collections), ) if certify_required( value=value, required=required, ): return # Check the type(s): types = [cls] if include_collections: types.extend([Mapping, MutableMapping]) types = tuple(types) if not isinstance(value, types): raise CertifierTypeError( message="Expected {t} but the type is {cls!r}".format( cls=cls, t=value.__class__.__name__, ), value=value, required=required, ) certify_dict_schema( value=value, schema=schema, key_certifier=key_certifier, value_certifier=value_certifier, required=required, allow_extra=allow_extra, )
[ "def", "certify_dict", "(", "value", ",", "schema", "=", "None", ",", "allow_extra", "=", "False", ",", "required", "=", "True", ",", "key_certifier", "=", "None", ",", "value_certifier", "=", "None", ",", "include_collections", "=", "False", ",", ")", ":", "cls", "=", "dict", "# Certify our kwargs:", "certify_params", "(", "(", "certify_bool", ",", "'allow_extra'", ",", "allow_extra", ")", ",", "(", "certify_bool", ",", "'include_collections'", ",", "include_collections", ")", ",", ")", "if", "certify_required", "(", "value", "=", "value", ",", "required", "=", "required", ",", ")", ":", "return", "# Check the type(s):", "types", "=", "[", "cls", "]", "if", "include_collections", ":", "types", ".", "extend", "(", "[", "Mapping", ",", "MutableMapping", "]", ")", "types", "=", "tuple", "(", "types", ")", "if", "not", "isinstance", "(", "value", ",", "types", ")", ":", "raise", "CertifierTypeError", "(", "message", "=", "\"Expected {t} but the type is {cls!r}\"", ".", "format", "(", "cls", "=", "cls", ",", "t", "=", "value", ".", "__class__", ".", "__name__", ",", ")", ",", "value", "=", "value", ",", "required", "=", "required", ",", ")", "certify_dict_schema", "(", "value", "=", "value", ",", "schema", "=", "schema", ",", "key_certifier", "=", "key_certifier", ",", "value_certifier", "=", "value_certifier", ",", "required", "=", "required", ",", "allow_extra", "=", "allow_extra", ",", ")" ]
Certifies a dictionary, checking it against an optional schema. The schema should be a dictionary, with keys corresponding to the expected keys in `value`, but with the values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_dict(schema={ ... 'id': certify_key(kind='Model'), ... 'count': certify_int(min=0), ... }) >>> certifier({'id': self.key, 'count': self.count}) :param dict|Mapping|MutableMapping value: The value to be certified. :param dict schema: The schema against which the value should be checked. :param bool allow_extra: Set to `True` to ignore extra keys. :param bool required: Whether the value can't be `None`. Defaults to True. :param callable key_certifier: callable that receives the key to certify (ignoring schema keys). :param callable value_certifier: callable that receives the value to certify (ignoring schema values). :param bool include_collections: Include types from collections. :return: The certified dict. :rtype: dict|Mapping|MutableMapping :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The value is invalid
[ "Certifies", "a", "dictionary", "checking", "it", "against", "an", "optional", "schema", "." ]
python
train
adaptive-learning/proso-apps
proso_models/models.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L712-L726
def get_reference_fields(self, exclude_models=None): """ Get all Django model fields which reference the Item model. """ if exclude_models is None: exclude_models = [] result = [] for django_model in django.apps.apps.get_models(): if any([issubclass(django_model, m) for m in exclude_models]): continue for django_field in django_model._meta.fields: if isinstance(django_field, models.ForeignKey) and django_field.related.to == Item: result = [(m, f) for (m, f) in result if not issubclass(django_model, m)] result.append((django_model, django_field)) return result
[ "def", "get_reference_fields", "(", "self", ",", "exclude_models", "=", "None", ")", ":", "if", "exclude_models", "is", "None", ":", "exclude_models", "=", "[", "]", "result", "=", "[", "]", "for", "django_model", "in", "django", ".", "apps", ".", "apps", ".", "get_models", "(", ")", ":", "if", "any", "(", "[", "issubclass", "(", "django_model", ",", "m", ")", "for", "m", "in", "exclude_models", "]", ")", ":", "continue", "for", "django_field", "in", "django_model", ".", "_meta", ".", "fields", ":", "if", "isinstance", "(", "django_field", ",", "models", ".", "ForeignKey", ")", "and", "django_field", ".", "related", ".", "to", "==", "Item", ":", "result", "=", "[", "(", "m", ",", "f", ")", "for", "(", "m", ",", "f", ")", "in", "result", "if", "not", "issubclass", "(", "django_model", ",", "m", ")", "]", "result", ".", "append", "(", "(", "django_model", ",", "django_field", ")", ")", "return", "result" ]
Get all Django model fields which reference the Item model.
[ "Get", "all", "Django", "model", "fields", "which", "reference", "the", "Item", "model", "." ]
python
train
CalebBell/thermo
thermo/safety.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/safety.py#L329-L365
def STEL(CASRN, AvailableMethods=False, Method=None): # pragma: no cover '''This function handles the retrieval of Short-term Exposure Limit on worker exposure to dangerous chemicals. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> STEL('67-64-1') (750.0, 'ppm') >>> STEL('7664-38-2') (0.7489774978301237, 'ppm') >>> STEL('55720-99-5') (2.0, 'mg/m^3') >>> STEL('86290-81-5', AvailableMethods=True) ['Ontario Limits', 'None'] ''' def list_methods(): methods = [] if CASRN in _OntarioExposureLimits and (_OntarioExposureLimits[CASRN]["STEL (ppm)"] or _OntarioExposureLimits[CASRN]["STEL (mg/m^3)"]): methods.append(ONTARIO) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == ONTARIO: if _OntarioExposureLimits[CASRN]["STEL (ppm)"]: _STEL = (_OntarioExposureLimits[CASRN]["STEL (ppm)"], 'ppm') elif _OntarioExposureLimits[CASRN]["STEL (mg/m^3)"]: _STEL = (_OntarioExposureLimits[CASRN]["STEL (mg/m^3)"], 'mg/m^3') elif Method == NONE: _STEL = None else: raise Exception('Failure in in function') return _STEL
[ "def", "STEL", "(", "CASRN", ",", "AvailableMethods", "=", "False", ",", "Method", "=", "None", ")", ":", "# pragma: no cover", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "if", "CASRN", "in", "_OntarioExposureLimits", "and", "(", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (ppm)\"", "]", "or", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (mg/m^3)\"", "]", ")", ":", "methods", ".", "append", "(", "ONTARIO", ")", "methods", ".", "append", "(", "NONE", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "==", "ONTARIO", ":", "if", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (ppm)\"", "]", ":", "_STEL", "=", "(", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (ppm)\"", "]", ",", "'ppm'", ")", "elif", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (mg/m^3)\"", "]", ":", "_STEL", "=", "(", "_OntarioExposureLimits", "[", "CASRN", "]", "[", "\"STEL (mg/m^3)\"", "]", ",", "'mg/m^3'", ")", "elif", "Method", "==", "NONE", ":", "_STEL", "=", "None", "else", ":", "raise", "Exception", "(", "'Failure in in function'", ")", "return", "_STEL" ]
This function handles the retrieval of Short-term Exposure Limit on worker exposure to dangerous chemicals. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> STEL('67-64-1') (750.0, 'ppm') >>> STEL('7664-38-2') (0.7489774978301237, 'ppm') >>> STEL('55720-99-5') (2.0, 'mg/m^3') >>> STEL('86290-81-5', AvailableMethods=True) ['Ontario Limits', 'None']
[ "This", "function", "handles", "the", "retrieval", "of", "Short", "-", "term", "Exposure", "Limit", "on", "worker", "exposure", "to", "dangerous", "chemicals", "." ]
python
valid
grangier/python-goose
goose/outputformatters.py
https://github.com/grangier/python-goose/blob/09023ec9f5ef26a628a2365616c0a7c864f0ecea/goose/outputformatters.py#L113-L133
def remove_fewwords_paragraphs(self): """\ remove paragraphs that have less than x number of words, would indicate that it's some sort of link """ all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['*']) all_nodes.reverse() for el in all_nodes: tag = self.parser.getTag(el) text = self.parser.getText(el) stop_words = self.stopwords_class(language=self.get_language()).get_stopword_count(text) if (tag != 'br' or text != '\\r') and stop_words.get_stopword_count() < 3 \ and len(self.parser.getElementsByTag(el, tag='object')) == 0 \ and len(self.parser.getElementsByTag(el, tag='embed')) == 0: self.parser.remove(el) # TODO # check if it is in the right place else: trimmed = self.parser.getText(el) if trimmed.startswith("(") and trimmed.endswith(")"): self.parser.remove(el)
[ "def", "remove_fewwords_paragraphs", "(", "self", ")", ":", "all_nodes", "=", "self", ".", "parser", ".", "getElementsByTags", "(", "self", ".", "get_top_node", "(", ")", ",", "[", "'*'", "]", ")", "all_nodes", ".", "reverse", "(", ")", "for", "el", "in", "all_nodes", ":", "tag", "=", "self", ".", "parser", ".", "getTag", "(", "el", ")", "text", "=", "self", ".", "parser", ".", "getText", "(", "el", ")", "stop_words", "=", "self", ".", "stopwords_class", "(", "language", "=", "self", ".", "get_language", "(", ")", ")", ".", "get_stopword_count", "(", "text", ")", "if", "(", "tag", "!=", "'br'", "or", "text", "!=", "'\\\\r'", ")", "and", "stop_words", ".", "get_stopword_count", "(", ")", "<", "3", "and", "len", "(", "self", ".", "parser", ".", "getElementsByTag", "(", "el", ",", "tag", "=", "'object'", ")", ")", "==", "0", "and", "len", "(", "self", ".", "parser", ".", "getElementsByTag", "(", "el", ",", "tag", "=", "'embed'", ")", ")", "==", "0", ":", "self", ".", "parser", ".", "remove", "(", "el", ")", "# TODO", "# check if it is in the right place", "else", ":", "trimmed", "=", "self", ".", "parser", ".", "getText", "(", "el", ")", "if", "trimmed", ".", "startswith", "(", "\"(\"", ")", "and", "trimmed", ".", "endswith", "(", "\")\"", ")", ":", "self", ".", "parser", ".", "remove", "(", "el", ")" ]
\ remove paragraphs that have less than x number of words, would indicate that it's some sort of link
[ "\\", "remove", "paragraphs", "that", "have", "less", "than", "x", "number", "of", "words", "would", "indicate", "that", "it", "s", "some", "sort", "of", "link" ]
python
train
woolfson-group/isambard
isambard/ampal/specifications/assembly_specs/solenoid.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/specifications/assembly_specs/solenoid.py#L61-L68
def build(self): """Builds a `HelixPair` using the defined attributes.""" for i in range(2): self._molecules.append( self.make_helix(self.aas[i], self.axis_distances[i], self.z_shifts[i], self.phis[i], self.splays[i], self.off_plane[i])) return
[ "def", "build", "(", "self", ")", ":", "for", "i", "in", "range", "(", "2", ")", ":", "self", ".", "_molecules", ".", "append", "(", "self", ".", "make_helix", "(", "self", ".", "aas", "[", "i", "]", ",", "self", ".", "axis_distances", "[", "i", "]", ",", "self", ".", "z_shifts", "[", "i", "]", ",", "self", ".", "phis", "[", "i", "]", ",", "self", ".", "splays", "[", "i", "]", ",", "self", ".", "off_plane", "[", "i", "]", ")", ")", "return" ]
Builds a `HelixPair` using the defined attributes.
[ "Builds", "a", "HelixPair", "using", "the", "defined", "attributes", "." ]
python
train
materialsproject/pymatgen-db
matgendb/query_engine.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/query_engine.py#L574-L586
def _wrapper(self, func): """ This function wraps all callable objects returned by self.__getattr__. If the result is a cursor, wrap it into a QueryResults object so that you can invoke postprocess functions in self._pproc """ def wrapped(*args, **kwargs): ret_val = func(*args, **kwargs) if isinstance(ret_val, pymongo.cursor.Cursor): ret_val = self.from_cursor(ret_val) return ret_val return wrapped
[ "def", "_wrapper", "(", "self", ",", "func", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ret_val", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "ret_val", ",", "pymongo", ".", "cursor", ".", "Cursor", ")", ":", "ret_val", "=", "self", ".", "from_cursor", "(", "ret_val", ")", "return", "ret_val", "return", "wrapped" ]
This function wraps all callable objects returned by self.__getattr__. If the result is a cursor, wrap it into a QueryResults object so that you can invoke postprocess functions in self._pproc
[ "This", "function", "wraps", "all", "callable", "objects", "returned", "by", "self", ".", "__getattr__", ".", "If", "the", "result", "is", "a", "cursor", "wrap", "it", "into", "a", "QueryResults", "object", "so", "that", "you", "can", "invoke", "postprocess", "functions", "in", "self", ".", "_pproc" ]
python
train
twilio/twilio-python
twilio/base/serialize.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/serialize.py#L35-L55
def prefixed_collapsible_map(m, prefix): """ Return a dict of params corresponding to those in m with the added prefix """ if m == values.unset: return {} def flatten_dict(d, result={}, prv_keys=[]): for k, v in d.items(): if isinstance(v, dict): flatten_dict(v, result, prv_keys + [k]) else: result['.'.join(prv_keys + [k])] = v return result if isinstance(m, dict): flattened = flatten_dict(m) return {'{}.{}'.format(prefix, k): v for k, v in flattened.items()} return {}
[ "def", "prefixed_collapsible_map", "(", "m", ",", "prefix", ")", ":", "if", "m", "==", "values", ".", "unset", ":", "return", "{", "}", "def", "flatten_dict", "(", "d", ",", "result", "=", "{", "}", ",", "prv_keys", "=", "[", "]", ")", ":", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "flatten_dict", "(", "v", ",", "result", ",", "prv_keys", "+", "[", "k", "]", ")", "else", ":", "result", "[", "'.'", ".", "join", "(", "prv_keys", "+", "[", "k", "]", ")", "]", "=", "v", "return", "result", "if", "isinstance", "(", "m", ",", "dict", ")", ":", "flattened", "=", "flatten_dict", "(", "m", ")", "return", "{", "'{}.{}'", ".", "format", "(", "prefix", ",", "k", ")", ":", "v", "for", "k", ",", "v", "in", "flattened", ".", "items", "(", ")", "}", "return", "{", "}" ]
Return a dict of params corresponding to those in m with the added prefix
[ "Return", "a", "dict", "of", "params", "corresponding", "to", "those", "in", "m", "with", "the", "added", "prefix" ]
python
train
rcsb/mmtf-python
mmtf/converters/numpy_converters.py
https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/converters/numpy_converters.py#L17-L23
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN))) return [s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings]
[ "def", "decode_chain_list", "(", "in_bytes", ")", ":", "bstrings", "=", "numpy", ".", "frombuffer", "(", "in_bytes", ",", "numpy", ".", "dtype", "(", "'S'", "+", "str", "(", "mmtf", ".", "utils", ".", "constants", ".", "CHAIN_LEN", ")", ")", ")", "return", "[", "s", ".", "decode", "(", "\"ascii\"", ")", ".", "strip", "(", "mmtf", ".", "utils", ".", "constants", ".", "NULL_BYTE", ")", "for", "s", "in", "bstrings", "]" ]
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
[ "Convert", "a", "list", "of", "bytes", "to", "a", "list", "of", "strings", ".", "Each", "string", "is", "of", "length", "mmtf", ".", "CHAIN_LEN" ]
python
train
workforce-data-initiative/skills-utils
skills_utils/es.py
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L154-L159
def replace(self): """Replace index with a new one zero_downtime_index for safety and rollback """ with zero_downtime_index(self.alias_name, self.index_config()) as target_index: self.index_all(target_index)
[ "def", "replace", "(", "self", ")", ":", "with", "zero_downtime_index", "(", "self", ".", "alias_name", ",", "self", ".", "index_config", "(", ")", ")", "as", "target_index", ":", "self", ".", "index_all", "(", "target_index", ")" ]
Replace index with a new one zero_downtime_index for safety and rollback
[ "Replace", "index", "with", "a", "new", "one", "zero_downtime_index", "for", "safety", "and", "rollback" ]
python
train
iKevinY/EulerPy
EulerPy/euler.py
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L70-L74
def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped')
[ "def", "skip", "(", "num", ")", ":", "click", ".", "echo", "(", "\"Current problem is problem %i.\"", "%", "num", ")", "generate", "(", "num", "+", "1", ",", "prompt_default", "=", "False", ")", "Problem", "(", "num", ")", ".", "file", ".", "change_suffix", "(", "'-skipped'", ")" ]
Generates Python file for the next problem.
[ "Generates", "Python", "file", "for", "the", "next", "problem", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L14997-L15015
def vrelg(v1, v2, ndim): """ Return the relative difference between two vectors of general dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrelg_c.html :param v1: First vector :type v1: Array of floats :param v2: Second vector :type v2: Array of floats :param ndim: Dimension of v1 and v2. :type ndim: int :return: the relative difference between v1 and v2. :rtype: float """ v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) ndim = ctypes.c_int(ndim) return libspice.vrelg_c(v1, v2, ndim)
[ "def", "vrelg", "(", "v1", ",", "v2", ",", "ndim", ")", ":", "v1", "=", "stypes", ".", "toDoubleVector", "(", "v1", ")", "v2", "=", "stypes", ".", "toDoubleVector", "(", "v2", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "return", "libspice", ".", "vrelg_c", "(", "v1", ",", "v2", ",", "ndim", ")" ]
Return the relative difference between two vectors of general dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrelg_c.html :param v1: First vector :type v1: Array of floats :param v2: Second vector :type v2: Array of floats :param ndim: Dimension of v1 and v2. :type ndim: int :return: the relative difference between v1 and v2. :rtype: float
[ "Return", "the", "relative", "difference", "between", "two", "vectors", "of", "general", "dimension", "." ]
python
train
wrobstory/vincent
vincent/visualization.py
https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/visualization.py#L241-L258
def common_axis_properties(self, color=None, title_size=None): """Set common axis properties such as color Parameters ---------- color: str, default None Hex color str, etc """ if self.axes: for axis in self.axes: self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size: ref = ValueRef(value=title_size) axis.properties.title.font_size = ref else: raise ValueError('This Visualization has no axes!') return self
[ "def", "common_axis_properties", "(", "self", ",", "color", "=", "None", ",", "title_size", "=", "None", ")", ":", "if", "self", ".", "axes", ":", "for", "axis", "in", "self", ".", "axes", ":", "self", ".", "_set_axis_properties", "(", "axis", ")", "self", ".", "_set_all_axis_color", "(", "axis", ",", "color", ")", "if", "title_size", ":", "ref", "=", "ValueRef", "(", "value", "=", "title_size", ")", "axis", ".", "properties", ".", "title", ".", "font_size", "=", "ref", "else", ":", "raise", "ValueError", "(", "'This Visualization has no axes!'", ")", "return", "self" ]
Set common axis properties such as color Parameters ---------- color: str, default None Hex color str, etc
[ "Set", "common", "axis", "properties", "such", "as", "color" ]
python
train
aiidalab/aiidalab-widgets-base
aiidalab_widgets_base/computers.py
https://github.com/aiidalab/aiidalab-widgets-base/blob/291a9b159eac902aee655862322670ec1b0cd5b1/aiidalab_widgets_base/computers.py#L247-L308
def on_setup_ssh(self, b): """ATTENTION: modifying the order of operations in this function can lead to unexpected problems""" with self._setup_ssh_out: clear_output() self._ssh_keygen() #temporary passwords password = self.__password proxy_password = self.__proxy_password # step 1: if hostname is not provided - do not do anything if self.hostname is None: # check hostname print("Please specify the computer hostname") return # step 2: check if password-free access was enabled earlier if self.can_login(): print ("Password-free access is already enabled") # it can still happen that password-free access is enabled # but host is not present in the config file - fixing this if not self.is_in_config(): self._write_ssh_config() # we do not use proxy here, because if computer # can be accessed without any info in the config - proxy is not needed. self.setup_counter += 1 # only if config file has changed - increase setup_counter return # step 3: if can't login already, chek whether all required information is provided if self.username is None: # check username print("Please enter your ssh username") return if len(password.strip()) == 0: # check password print("Please enter your ssh password") return # step 4: get the right commands to access the proxy server (if provided) success, proxycmd = self._configure_proxy(password, proxy_password) if not success: return # step 5: make host known by ssh on the proxy server if not self.is_host_known(): self._make_host_known(self.hostname,['ssh']+[proxycmd] if proxycmd else []) # step 6: sending public key to the main host if not self._send_pubkey(self.hostname, self.username, password, proxycmd): print ("Could not send public key to {}".format(self.hostname)) return # step 7: modify the ssh config file if necessary if not self.is_in_config(): self._write_ssh_config(proxycmd=proxycmd) # TODO: add a check if new config is different from the current one. If so # infrom the user about it. # step 8: final check if self.can_login(): self.setup_counter += 1 print("Automatic ssh setup successful :-)") return else: print("Automatic ssh setup failed, sorry :-(") return
[ "def", "on_setup_ssh", "(", "self", ",", "b", ")", ":", "with", "self", ".", "_setup_ssh_out", ":", "clear_output", "(", ")", "self", ".", "_ssh_keygen", "(", ")", "#temporary passwords", "password", "=", "self", ".", "__password", "proxy_password", "=", "self", ".", "__proxy_password", "# step 1: if hostname is not provided - do not do anything", "if", "self", ".", "hostname", "is", "None", ":", "# check hostname", "print", "(", "\"Please specify the computer hostname\"", ")", "return", "# step 2: check if password-free access was enabled earlier", "if", "self", ".", "can_login", "(", ")", ":", "print", "(", "\"Password-free access is already enabled\"", ")", "# it can still happen that password-free access is enabled", "# but host is not present in the config file - fixing this", "if", "not", "self", ".", "is_in_config", "(", ")", ":", "self", ".", "_write_ssh_config", "(", ")", "# we do not use proxy here, because if computer", "# can be accessed without any info in the config - proxy is not needed.", "self", ".", "setup_counter", "+=", "1", "# only if config file has changed - increase setup_counter", "return", "# step 3: if can't login already, chek whether all required information is provided", "if", "self", ".", "username", "is", "None", ":", "# check username", "print", "(", "\"Please enter your ssh username\"", ")", "return", "if", "len", "(", "password", ".", "strip", "(", ")", ")", "==", "0", ":", "# check password", "print", "(", "\"Please enter your ssh password\"", ")", "return", "# step 4: get the right commands to access the proxy server (if provided)", "success", ",", "proxycmd", "=", "self", ".", "_configure_proxy", "(", "password", ",", "proxy_password", ")", "if", "not", "success", ":", "return", "# step 5: make host known by ssh on the proxy server", "if", "not", "self", ".", "is_host_known", "(", ")", ":", "self", ".", "_make_host_known", "(", "self", ".", "hostname", ",", "[", "'ssh'", "]", "+", "[", "proxycmd", "]", "if", "proxycmd", "else", "[", "]", ")", "# step 6: sending public key to the main host", "if", "not", "self", ".", "_send_pubkey", "(", "self", ".", "hostname", ",", "self", ".", "username", ",", "password", ",", "proxycmd", ")", ":", "print", "(", "\"Could not send public key to {}\"", ".", "format", "(", "self", ".", "hostname", ")", ")", "return", "# step 7: modify the ssh config file if necessary", "if", "not", "self", ".", "is_in_config", "(", ")", ":", "self", ".", "_write_ssh_config", "(", "proxycmd", "=", "proxycmd", ")", "# TODO: add a check if new config is different from the current one. If so", "# infrom the user about it.", "# step 8: final check", "if", "self", ".", "can_login", "(", ")", ":", "self", ".", "setup_counter", "+=", "1", "print", "(", "\"Automatic ssh setup successful :-)\"", ")", "return", "else", ":", "print", "(", "\"Automatic ssh setup failed, sorry :-(\"", ")", "return" ]
ATTENTION: modifying the order of operations in this function can lead to unexpected problems
[ "ATTENTION", ":", "modifying", "the", "order", "of", "operations", "in", "this", "function", "can", "lead", "to", "unexpected", "problems" ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1298-L1318
async def stepper_step(self, motor_speed, number_of_steps): """ Move a stepper motor for the number of steps at the specified speed This is a FirmataPlus feature. :param motor_speed: 21 bits of data to set motor speed :param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse :returns: No return value. """ if number_of_steps > 0: direction = 1 else: direction = 0 abs_number_of_steps = abs(number_of_steps) data = [PrivateConstants.STEPPER_STEP, motor_speed & 0x7f, (motor_speed >> 7) & 0x7f, (motor_speed >> 14) & 0x7f, abs_number_of_steps & 0x7f, (abs_number_of_steps >> 7) & 0x7f, direction] await self._send_sysex(PrivateConstants.STEPPER_DATA, data)
[ "async", "def", "stepper_step", "(", "self", ",", "motor_speed", ",", "number_of_steps", ")", ":", "if", "number_of_steps", ">", "0", ":", "direction", "=", "1", "else", ":", "direction", "=", "0", "abs_number_of_steps", "=", "abs", "(", "number_of_steps", ")", "data", "=", "[", "PrivateConstants", ".", "STEPPER_STEP", ",", "motor_speed", "&", "0x7f", ",", "(", "motor_speed", ">>", "7", ")", "&", "0x7f", ",", "(", "motor_speed", ">>", "14", ")", "&", "0x7f", ",", "abs_number_of_steps", "&", "0x7f", ",", "(", "abs_number_of_steps", ">>", "7", ")", "&", "0x7f", ",", "direction", "]", "await", "self", ".", "_send_sysex", "(", "PrivateConstants", ".", "STEPPER_DATA", ",", "data", ")" ]
Move a stepper motor for the number of steps at the specified speed This is a FirmataPlus feature. :param motor_speed: 21 bits of data to set motor speed :param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse :returns: No return value.
[ "Move", "a", "stepper", "motor", "for", "the", "number", "of", "steps", "at", "the", "specified", "speed", "This", "is", "a", "FirmataPlus", "feature", "." ]
python
train
MostAwesomeDude/gentleman
gentleman/base.py
https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L545-L588
def ReplaceInstanceDisks(r, instance, disks=None, mode=REPLACE_DISK_AUTO, remote_node=None, iallocator=None, dry_run=False): """ Replaces disks on an instance. @type instance: str @param instance: instance whose disks to replace @type disks: list of ints @param disks: Indexes of disks to replace @type mode: str @param mode: replacement mode to use (defaults to replace_auto) @type remote_node: str or None @param remote_node: new secondary node to use (for use with replace_new_secondary mode) @type iallocator: str or None @param iallocator: instance allocator plugin to use (for use with replace_auto mode) @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """ if mode not in REPLACE_DISK: raise GanetiApiError("Invalid mode %r not one of %r" % (mode, REPLACE_DISK)) query = { "mode": mode, "dry-run": dry_run, } if disks: query["disks"] = ",".join(str(idx) for idx in disks) if remote_node: query["remote_node"] = remote_node if iallocator: query["iallocator"] = iallocator return r.request("post", "/2/instances/%s/replace-disks" % instance, query=query)
[ "def", "ReplaceInstanceDisks", "(", "r", ",", "instance", ",", "disks", "=", "None", ",", "mode", "=", "REPLACE_DISK_AUTO", ",", "remote_node", "=", "None", ",", "iallocator", "=", "None", ",", "dry_run", "=", "False", ")", ":", "if", "mode", "not", "in", "REPLACE_DISK", ":", "raise", "GanetiApiError", "(", "\"Invalid mode %r not one of %r\"", "%", "(", "mode", ",", "REPLACE_DISK", ")", ")", "query", "=", "{", "\"mode\"", ":", "mode", ",", "\"dry-run\"", ":", "dry_run", ",", "}", "if", "disks", ":", "query", "[", "\"disks\"", "]", "=", "\",\"", ".", "join", "(", "str", "(", "idx", ")", "for", "idx", "in", "disks", ")", "if", "remote_node", ":", "query", "[", "\"remote_node\"", "]", "=", "remote_node", "if", "iallocator", ":", "query", "[", "\"iallocator\"", "]", "=", "iallocator", "return", "r", ".", "request", "(", "\"post\"", ",", "\"/2/instances/%s/replace-disks\"", "%", "instance", ",", "query", "=", "query", ")" ]
Replaces disks on an instance. @type instance: str @param instance: instance whose disks to replace @type disks: list of ints @param disks: Indexes of disks to replace @type mode: str @param mode: replacement mode to use (defaults to replace_auto) @type remote_node: str or None @param remote_node: new secondary node to use (for use with replace_new_secondary mode) @type iallocator: str or None @param iallocator: instance allocator plugin to use (for use with replace_auto mode) @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id
[ "Replaces", "disks", "on", "an", "instance", "." ]
python
train
kushaldas/retask
retask/queue.py
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L130-L162
def wait(self, wait_time=0): """ Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts. :arg wait_time: Time in seconds to wait, default is infinite. :return: :class:`~retask.task.Task` object from the queue or False if it timeouts. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> task = q.wait() >>> print task.data {u'name': u'kushal'} .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.brpop(self._name, wait_time) if data: task = Task() task.__dict__ = json.loads(data[1]) return task else: return False
[ "def", "wait", "(", "self", ",", "wait_time", "=", "0", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "data", "=", "self", ".", "rdb", ".", "brpop", "(", "self", ".", "_name", ",", "wait_time", ")", "if", "data", ":", "task", "=", "Task", "(", ")", "task", ".", "__dict__", "=", "json", ".", "loads", "(", "data", "[", "1", "]", ")", "return", "task", "else", ":", "return", "False" ]
Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts. :arg wait_time: Time in seconds to wait, default is infinite. :return: :class:`~retask.task.Task` object from the queue or False if it timeouts. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> task = q.wait() >>> print task.data {u'name': u'kushal'} .. note:: This is a blocking call, you can specity wait_time argument for timeout.
[ "Returns", "a", ":", "class", ":", "~retask", ".", "task", ".", "Task", "object", "from", "the", "queue", ".", "Returns", "False", "if", "it", "timeouts", "." ]
python
train
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L1660-L1699
def remove_provenance_project_variables(): """Removing variables from provenance data.""" project_context_scope = QgsExpressionContextUtils.projectScope( QgsProject.instance()) existing_variable_names = project_context_scope.variableNames() # Save the existing variables that's not provenance variable. existing_variables = {} for existing_variable_name in existing_variable_names: existing_variables[existing_variable_name] = \ project_context_scope.variable(existing_variable_name) for the_provenance in provenance_list: if the_provenance['provenance_key'] in existing_variables: existing_variables.pop(the_provenance['provenance_key']) # Removing generated key from dictionary (e.g. # action_checklist__0__item_list__0) will_be_removed = [] for existing_variable in existing_variables: for the_provenance in provenance_list: if existing_variable.startswith( the_provenance['provenance_key']): will_be_removed.append(existing_variable) continue for variable in will_be_removed: existing_variables.pop(variable) # Need to change to None, to be able to store it back. non_null_existing_variables = {} for k, v in list(existing_variables.items()): if v is None or (hasattr(v, 'isNull') and v.isNull()): non_null_existing_variables[k] = None else: non_null_existing_variables[k] = v # This method will set non_null_existing_variables, and remove the # other variable QgsExpressionContextUtils.setProjectVariables( QgsProject.instance(), non_null_existing_variables)
[ "def", "remove_provenance_project_variables", "(", ")", ":", "project_context_scope", "=", "QgsExpressionContextUtils", ".", "projectScope", "(", "QgsProject", ".", "instance", "(", ")", ")", "existing_variable_names", "=", "project_context_scope", ".", "variableNames", "(", ")", "# Save the existing variables that's not provenance variable.", "existing_variables", "=", "{", "}", "for", "existing_variable_name", "in", "existing_variable_names", ":", "existing_variables", "[", "existing_variable_name", "]", "=", "project_context_scope", ".", "variable", "(", "existing_variable_name", ")", "for", "the_provenance", "in", "provenance_list", ":", "if", "the_provenance", "[", "'provenance_key'", "]", "in", "existing_variables", ":", "existing_variables", ".", "pop", "(", "the_provenance", "[", "'provenance_key'", "]", ")", "# Removing generated key from dictionary (e.g.", "# action_checklist__0__item_list__0)", "will_be_removed", "=", "[", "]", "for", "existing_variable", "in", "existing_variables", ":", "for", "the_provenance", "in", "provenance_list", ":", "if", "existing_variable", ".", "startswith", "(", "the_provenance", "[", "'provenance_key'", "]", ")", ":", "will_be_removed", ".", "append", "(", "existing_variable", ")", "continue", "for", "variable", "in", "will_be_removed", ":", "existing_variables", ".", "pop", "(", "variable", ")", "# Need to change to None, to be able to store it back.", "non_null_existing_variables", "=", "{", "}", "for", "k", ",", "v", "in", "list", "(", "existing_variables", ".", "items", "(", ")", ")", ":", "if", "v", "is", "None", "or", "(", "hasattr", "(", "v", ",", "'isNull'", ")", "and", "v", ".", "isNull", "(", ")", ")", ":", "non_null_existing_variables", "[", "k", "]", "=", "None", "else", ":", "non_null_existing_variables", "[", "k", "]", "=", "v", "# This method will set non_null_existing_variables, and remove the", "# other variable", "QgsExpressionContextUtils", ".", "setProjectVariables", "(", "QgsProject", ".", "instance", "(", ")", ",", "non_null_existing_variables", ")" ]
Removing variables from provenance data.
[ "Removing", "variables", "from", "provenance", "data", "." ]
python
train
skorch-dev/skorch
examples/cli/train.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/examples/cli/train.py#L137-L144
def save_model(model, output_file): """Save model to output_file, if given""" if not output_file: return with open(output_file, 'wb') as f: pickle.dump(model, f) print("Saved model to file '{}'.".format(output_file))
[ "def", "save_model", "(", "model", ",", "output_file", ")", ":", "if", "not", "output_file", ":", "return", "with", "open", "(", "output_file", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "model", ",", "f", ")", "print", "(", "\"Saved model to file '{}'.\"", ".", "format", "(", "output_file", ")", ")" ]
Save model to output_file, if given
[ "Save", "model", "to", "output_file", "if", "given" ]
python
train
pyannote/pyannote-metrics
pyannote/metrics/base.py
https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L144-L219
def report(self, display=False): """Evaluation report Parameters ---------- display : bool, optional Set to True to print the report to stdout. Returns ------- report : pandas.DataFrame Dataframe with one column per metric component, one row per evaluated item, and one final row for accumulated results. """ report = [] uris = [] percent = 'total' in self.metric_components() for uri, components in self.results_: row = {} if percent: total = components['total'] for key, value in components.items(): if key == self.name: row[key, '%'] = 100 * value elif key == 'total': row[key, ''] = value else: row[key, ''] = value if percent: if total > 0: row[key, '%'] = 100 * value / total else: row[key, '%'] = np.NaN report.append(row) uris.append(uri) row = {} components = self.accumulated_ if percent: total = components['total'] for key, value in components.items(): if key == self.name: row[key, '%'] = 100 * value elif key == 'total': row[key, ''] = value else: row[key, ''] = value if percent: if total > 0: row[key, '%'] = 100 * value / total else: row[key, '%'] = np.NaN row[self.name, '%'] = 100 * abs(self) report.append(row) uris.append('TOTAL') df = pd.DataFrame(report) df['item'] = uris df = df.set_index('item') df.columns = pd.MultiIndex.from_tuples(df.columns) df = df[[self.name] + self.metric_components()] if display: print(df.to_string(index=True, sparsify=False, justify='right', float_format=lambda f: '{0:.2f}'.format(f))) return df
[ "def", "report", "(", "self", ",", "display", "=", "False", ")", ":", "report", "=", "[", "]", "uris", "=", "[", "]", "percent", "=", "'total'", "in", "self", ".", "metric_components", "(", ")", "for", "uri", ",", "components", "in", "self", ".", "results_", ":", "row", "=", "{", "}", "if", "percent", ":", "total", "=", "components", "[", "'total'", "]", "for", "key", ",", "value", "in", "components", ".", "items", "(", ")", ":", "if", "key", "==", "self", ".", "name", ":", "row", "[", "key", ",", "'%'", "]", "=", "100", "*", "value", "elif", "key", "==", "'total'", ":", "row", "[", "key", ",", "''", "]", "=", "value", "else", ":", "row", "[", "key", ",", "''", "]", "=", "value", "if", "percent", ":", "if", "total", ">", "0", ":", "row", "[", "key", ",", "'%'", "]", "=", "100", "*", "value", "/", "total", "else", ":", "row", "[", "key", ",", "'%'", "]", "=", "np", ".", "NaN", "report", ".", "append", "(", "row", ")", "uris", ".", "append", "(", "uri", ")", "row", "=", "{", "}", "components", "=", "self", ".", "accumulated_", "if", "percent", ":", "total", "=", "components", "[", "'total'", "]", "for", "key", ",", "value", "in", "components", ".", "items", "(", ")", ":", "if", "key", "==", "self", ".", "name", ":", "row", "[", "key", ",", "'%'", "]", "=", "100", "*", "value", "elif", "key", "==", "'total'", ":", "row", "[", "key", ",", "''", "]", "=", "value", "else", ":", "row", "[", "key", ",", "''", "]", "=", "value", "if", "percent", ":", "if", "total", ">", "0", ":", "row", "[", "key", ",", "'%'", "]", "=", "100", "*", "value", "/", "total", "else", ":", "row", "[", "key", ",", "'%'", "]", "=", "np", ".", "NaN", "row", "[", "self", ".", "name", ",", "'%'", "]", "=", "100", "*", "abs", "(", "self", ")", "report", ".", "append", "(", "row", ")", "uris", ".", "append", "(", "'TOTAL'", ")", "df", "=", "pd", ".", "DataFrame", "(", "report", ")", "df", "[", "'item'", "]", "=", "uris", "df", "=", "df", ".", "set_index", "(", "'item'", ")", "df", ".", "columns", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "df", ".", "columns", ")", "df", "=", "df", "[", "[", "self", ".", "name", "]", "+", "self", ".", "metric_components", "(", ")", "]", "if", "display", ":", "print", "(", "df", ".", "to_string", "(", "index", "=", "True", ",", "sparsify", "=", "False", ",", "justify", "=", "'right'", ",", "float_format", "=", "lambda", "f", ":", "'{0:.2f}'", ".", "format", "(", "f", ")", ")", ")", "return", "df" ]
Evaluation report Parameters ---------- display : bool, optional Set to True to print the report to stdout. Returns ------- report : pandas.DataFrame Dataframe with one column per metric component, one row per evaluated item, and one final row for accumulated results.
[ "Evaluation", "report" ]
python
train
lpantano/seqcluster
seqcluster/db/__init__.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/db/__init__.py#L78-L96
def _insert_data(con, data): """ insert line for each cluster """ with con: cur = con.cursor() cur.execute("DROP TABLE IF EXISTS clusters;") cur.execute("CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)") for c in data[0]: locus = json.dumps(data[0][c]['loci']) annotation = json.dumps(data[0][c]['ann']) description = _get_description(data[0][c]['ann']) sequences = json.dumps(_get_sequences(data[0][c])) keys = data[0][c]['freq'][0].values()[0].keys() profile = "Not available." if 'profile' in data[0][c]: profile = json.dumps(_set_format(data[0][c]['profile'])) precursor = json.dumps(data[0][c].get('precursor')) cur.execute("INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')" % (c, description, locus, annotation, sequences, profile, precursor))
[ "def", "_insert_data", "(", "con", ",", "data", ")", ":", "with", "con", ":", "cur", "=", "con", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"DROP TABLE IF EXISTS clusters;\"", ")", "cur", ".", "execute", "(", "\"CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)\"", ")", "for", "c", "in", "data", "[", "0", "]", ":", "locus", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'loci'", "]", ")", "annotation", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'ann'", "]", ")", "description", "=", "_get_description", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'ann'", "]", ")", "sequences", "=", "json", ".", "dumps", "(", "_get_sequences", "(", "data", "[", "0", "]", "[", "c", "]", ")", ")", "keys", "=", "data", "[", "0", "]", "[", "c", "]", "[", "'freq'", "]", "[", "0", "]", ".", "values", "(", ")", "[", "0", "]", ".", "keys", "(", ")", "profile", "=", "\"Not available.\"", "if", "'profile'", "in", "data", "[", "0", "]", "[", "c", "]", ":", "profile", "=", "json", ".", "dumps", "(", "_set_format", "(", "data", "[", "0", "]", "[", "c", "]", "[", "'profile'", "]", ")", ")", "precursor", "=", "json", ".", "dumps", "(", "data", "[", "0", "]", "[", "c", "]", ".", "get", "(", "'precursor'", ")", ")", "cur", ".", "execute", "(", "\"INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')\"", "%", "(", "c", ",", "description", ",", "locus", ",", "annotation", ",", "sequences", ",", "profile", ",", "precursor", ")", ")" ]
insert line for each cluster
[ "insert", "line", "for", "each", "cluster" ]
python
train
jbm950/pygame_toolbox
pygame_toolbox/graphics/__init__.py
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/graphics/__init__.py#L396-L439
def set_offset(self, offset, mid=None): """This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1) """ if mid: imagesize = self.image.get_size() imagemidp = (int(imagesize[0] * 0.5), int(imagesize[1] * 0.5)) if mid == 'x': offset = (offset[0] - imagemidp[0], offset[1]) if mid == 'y': offset = (offset[0], offset[1] - imagemidp[1]) if mid == 'c': offset = (offset[0] - imagemidp[0], offset[1] - imagemidp[1]) self.pos = offset for i in self.buttonlist: i.rect[0] += offset[0] i.rect[1] += offset[1] try: for i in self.widgetlist: i.rect[0] += offset[0] i.rect[1] += offset[1] except AttributeError: pass
[ "def", "set_offset", "(", "self", ",", "offset", ",", "mid", "=", "None", ")", ":", "if", "mid", ":", "imagesize", "=", "self", ".", "image", ".", "get_size", "(", ")", "imagemidp", "=", "(", "int", "(", "imagesize", "[", "0", "]", "*", "0.5", ")", ",", "int", "(", "imagesize", "[", "1", "]", "*", "0.5", ")", ")", "if", "mid", "==", "'x'", ":", "offset", "=", "(", "offset", "[", "0", "]", "-", "imagemidp", "[", "0", "]", ",", "offset", "[", "1", "]", ")", "if", "mid", "==", "'y'", ":", "offset", "=", "(", "offset", "[", "0", "]", ",", "offset", "[", "1", "]", "-", "imagemidp", "[", "1", "]", ")", "if", "mid", "==", "'c'", ":", "offset", "=", "(", "offset", "[", "0", "]", "-", "imagemidp", "[", "0", "]", ",", "offset", "[", "1", "]", "-", "imagemidp", "[", "1", "]", ")", "self", ".", "pos", "=", "offset", "for", "i", "in", "self", ".", "buttonlist", ":", "i", ".", "rect", "[", "0", "]", "+=", "offset", "[", "0", "]", "i", ".", "rect", "[", "1", "]", "+=", "offset", "[", "1", "]", "try", ":", "for", "i", "in", "self", ".", "widgetlist", ":", "i", ".", "rect", "[", "0", "]", "+=", "offset", "[", "0", "]", "i", ".", "rect", "[", "1", "]", "+=", "offset", "[", "1", "]", "except", "AttributeError", ":", "pass" ]
This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1)
[ "This", "method", "will", "allow", "the", "menu", "to", "be", "placed", "anywhere", "in", "the", "open", "window", "instead", "of", "just", "the", "upper", "left", "corner", ".", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "Inputs", ":", "offset", "-", "This", "is", "the", "x", "y", "tuple", "of", "the", "position", "that", "you", "want", "to", "move", "the", "screen", "to", "." ]
python
train
PyCQA/pylint-django
pylint_django/augmentations/__init__.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L483-L498
def _attribute_is_magic(node, attrs, parents): """Checks that node is an attribute used inside one of allowed parents""" if node.attrname not in attrs: return False if not node.last_child(): return False try: for cls in node.last_child().inferred(): if isinstance(cls, Super): cls = cls._self_class # pylint: disable=protected-access if node_is_subclass(cls, *parents) or cls.qname() in parents: return True except InferenceError: pass return False
[ "def", "_attribute_is_magic", "(", "node", ",", "attrs", ",", "parents", ")", ":", "if", "node", ".", "attrname", "not", "in", "attrs", ":", "return", "False", "if", "not", "node", ".", "last_child", "(", ")", ":", "return", "False", "try", ":", "for", "cls", "in", "node", ".", "last_child", "(", ")", ".", "inferred", "(", ")", ":", "if", "isinstance", "(", "cls", ",", "Super", ")", ":", "cls", "=", "cls", ".", "_self_class", "# pylint: disable=protected-access", "if", "node_is_subclass", "(", "cls", ",", "*", "parents", ")", "or", "cls", ".", "qname", "(", ")", "in", "parents", ":", "return", "True", "except", "InferenceError", ":", "pass", "return", "False" ]
Checks that node is an attribute used inside one of allowed parents
[ "Checks", "that", "node", "is", "an", "attribute", "used", "inside", "one", "of", "allowed", "parents" ]
python
train
odlgroup/odl
odl/contrib/mrc/mrc.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L477-L589
def read_extended_header(self, groupby='field', force_type=''): """Read the extended header according to `extended_header_type`. Currently, only the FEI extended header format is supported. See `print_fei_ext_header_spec` or `this homepage`_ for the format specification. The extended header usually has one header section per image (slice), in case of the FEI header 128 bytes each, with a total of 1024 sections. Parameters ---------- groupby : {'field', 'section'}, optional How to group the values in the extended header sections. ``'field'`` : make an array per section field, e.g.:: 'defocus': [dval1, dval2, ..., dval1024], 'exp_time': [tval1, tval2, ..., tval1024], ... ``'section'`` : make a dictionary for each section, e.g.:: {'defocus': dval1, 'exp_time': tval1}, {'defocus': dval2, 'exp_time': tval2}, ... If the number of images is smaller than 1024, the last values are all set to zero. force_type : string, optional If given, this value overrides the `extended_header_type` from `header`. Currently supported: ``'FEI1'`` Returns ------- ext_header: `OrderedDict` or tuple For ``groupby == 'field'``, a dictionary with the field names as keys, like in the example. For ``groupby == 'section'``, a tuple of dictionaries as shown above. The returned data structures store no offsets, in contrast to the regular header. See Also -------- References ---------- .. _this homepage: http://www.2dx.unibas.ch/documentation/mrc-software/fei-\ extended-mrc-format-not-used-by-2dx """ ext_header_type = str(force_type).upper() or self.extended_header_type if ext_header_type != 'FEI1': raise ValueError("extended header type '{}' not supported" "".format(self.extended_header_type)) groupby, groupby_in = str(groupby).lower(), groupby ext_header_len = int(self.header['nsymbt']['value']) if ext_header_len % MRC_FEI_SECTION_SIZE: raise ValueError('extended header length {} from header is ' 'not divisible by extended header section size ' '{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE)) num_sections = ext_header_len // MRC_FEI_SECTION_SIZE if num_sections != MRC_FEI_NUM_SECTIONS: raise ValueError('calculated number of sections ({}) not equal to ' 'expected number of sections ({})' ''.format(num_sections, MRC_FEI_NUM_SECTIONS)) section_fields = header_fields_from_table( MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS, dtype_map=MRC_DTYPE_TO_NPY_DTYPE) # Make a list for each field and append the values for that # field. Then create an array from that list and store it # under the field name. ext_header = OrderedDict() for field in section_fields: value_list = [] field_offset = field['offset'] field_dtype = field['dtype'] field_dshape = field['dshape'] # Compute some parameters num_items = int(np.prod(field_dshape)) size_bytes = num_items * field_dtype.itemsize fmt = '{}{}'.format(num_items, field_dtype.char) for section in range(num_sections): # Get the bytestring from the right position in the file, # unpack it and append the value to the list. start = section * MRC_FEI_SECTION_SIZE + field_offset self.file.seek(start) packed_value = self.file.read(size_bytes) value_list.append(struct.unpack(fmt, packed_value)) ext_header[field['name']] = np.array(value_list, dtype=field_dtype) if groupby == 'field': return ext_header elif groupby == 'section': # Transpose the data and return as tuple. return tuple({key: ext_header[key][i] for key in ext_header} for i in range(num_sections)) else: raise ValueError("`groupby` '{}' not understood" "".format(groupby_in))
[ "def", "read_extended_header", "(", "self", ",", "groupby", "=", "'field'", ",", "force_type", "=", "''", ")", ":", "ext_header_type", "=", "str", "(", "force_type", ")", ".", "upper", "(", ")", "or", "self", ".", "extended_header_type", "if", "ext_header_type", "!=", "'FEI1'", ":", "raise", "ValueError", "(", "\"extended header type '{}' not supported\"", "\"\"", ".", "format", "(", "self", ".", "extended_header_type", ")", ")", "groupby", ",", "groupby_in", "=", "str", "(", "groupby", ")", ".", "lower", "(", ")", ",", "groupby", "ext_header_len", "=", "int", "(", "self", ".", "header", "[", "'nsymbt'", "]", "[", "'value'", "]", ")", "if", "ext_header_len", "%", "MRC_FEI_SECTION_SIZE", ":", "raise", "ValueError", "(", "'extended header length {} from header is '", "'not divisible by extended header section size '", "'{}'", ".", "format", "(", "ext_header_len", ",", "MRC_FEI_SECTION_SIZE", ")", ")", "num_sections", "=", "ext_header_len", "//", "MRC_FEI_SECTION_SIZE", "if", "num_sections", "!=", "MRC_FEI_NUM_SECTIONS", ":", "raise", "ValueError", "(", "'calculated number of sections ({}) not equal to '", "'expected number of sections ({})'", "''", ".", "format", "(", "num_sections", ",", "MRC_FEI_NUM_SECTIONS", ")", ")", "section_fields", "=", "header_fields_from_table", "(", "MRC_FEI_EXT_HEADER_SECTION", ",", "keys", "=", "MRC_SPEC_KEYS", ",", "dtype_map", "=", "MRC_DTYPE_TO_NPY_DTYPE", ")", "# Make a list for each field and append the values for that", "# field. Then create an array from that list and store it", "# under the field name.", "ext_header", "=", "OrderedDict", "(", ")", "for", "field", "in", "section_fields", ":", "value_list", "=", "[", "]", "field_offset", "=", "field", "[", "'offset'", "]", "field_dtype", "=", "field", "[", "'dtype'", "]", "field_dshape", "=", "field", "[", "'dshape'", "]", "# Compute some parameters", "num_items", "=", "int", "(", "np", ".", "prod", "(", "field_dshape", ")", ")", "size_bytes", "=", "num_items", "*", "field_dtype", ".", "itemsize", "fmt", "=", "'{}{}'", ".", "format", "(", "num_items", ",", "field_dtype", ".", "char", ")", "for", "section", "in", "range", "(", "num_sections", ")", ":", "# Get the bytestring from the right position in the file,", "# unpack it and append the value to the list.", "start", "=", "section", "*", "MRC_FEI_SECTION_SIZE", "+", "field_offset", "self", ".", "file", ".", "seek", "(", "start", ")", "packed_value", "=", "self", ".", "file", ".", "read", "(", "size_bytes", ")", "value_list", ".", "append", "(", "struct", ".", "unpack", "(", "fmt", ",", "packed_value", ")", ")", "ext_header", "[", "field", "[", "'name'", "]", "]", "=", "np", ".", "array", "(", "value_list", ",", "dtype", "=", "field_dtype", ")", "if", "groupby", "==", "'field'", ":", "return", "ext_header", "elif", "groupby", "==", "'section'", ":", "# Transpose the data and return as tuple.", "return", "tuple", "(", "{", "key", ":", "ext_header", "[", "key", "]", "[", "i", "]", "for", "key", "in", "ext_header", "}", "for", "i", "in", "range", "(", "num_sections", ")", ")", "else", ":", "raise", "ValueError", "(", "\"`groupby` '{}' not understood\"", "\"\"", ".", "format", "(", "groupby_in", ")", ")" ]
Read the extended header according to `extended_header_type`. Currently, only the FEI extended header format is supported. See `print_fei_ext_header_spec` or `this homepage`_ for the format specification. The extended header usually has one header section per image (slice), in case of the FEI header 128 bytes each, with a total of 1024 sections. Parameters ---------- groupby : {'field', 'section'}, optional How to group the values in the extended header sections. ``'field'`` : make an array per section field, e.g.:: 'defocus': [dval1, dval2, ..., dval1024], 'exp_time': [tval1, tval2, ..., tval1024], ... ``'section'`` : make a dictionary for each section, e.g.:: {'defocus': dval1, 'exp_time': tval1}, {'defocus': dval2, 'exp_time': tval2}, ... If the number of images is smaller than 1024, the last values are all set to zero. force_type : string, optional If given, this value overrides the `extended_header_type` from `header`. Currently supported: ``'FEI1'`` Returns ------- ext_header: `OrderedDict` or tuple For ``groupby == 'field'``, a dictionary with the field names as keys, like in the example. For ``groupby == 'section'``, a tuple of dictionaries as shown above. The returned data structures store no offsets, in contrast to the regular header. See Also -------- References ---------- .. _this homepage: http://www.2dx.unibas.ch/documentation/mrc-software/fei-\ extended-mrc-format-not-used-by-2dx
[ "Read", "the", "extended", "header", "according", "to", "extended_header_type", "." ]
python
train
openearth/mmi-python
mmi/runner.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/runner.py#L123-L146
def create_bmi_model(self, engine, bmi_class=None, wrapper_kwargs=None): """initialize a bmi mode using an optional class""" if wrapper_kwargs is None: wrapper_kwargs = {} if bmi_class is None: wrapper_class = bmi.wrapper.BMIWrapper else: wrapper_class = self.import_from_string(bmi_class) try: """most models use engine as a first argument""" model = wrapper_class( engine, **wrapper_kwargs ) except TypeError as e: """but old python engines are engines, so they don't, but they should """ logger.warn( 'Model wrapper %s does not accept engine as a first argument', wrapper_class ) model = wrapper_class( **wrapper_kwargs ) return model
[ "def", "create_bmi_model", "(", "self", ",", "engine", ",", "bmi_class", "=", "None", ",", "wrapper_kwargs", "=", "None", ")", ":", "if", "wrapper_kwargs", "is", "None", ":", "wrapper_kwargs", "=", "{", "}", "if", "bmi_class", "is", "None", ":", "wrapper_class", "=", "bmi", ".", "wrapper", ".", "BMIWrapper", "else", ":", "wrapper_class", "=", "self", ".", "import_from_string", "(", "bmi_class", ")", "try", ":", "\"\"\"most models use engine as a first argument\"\"\"", "model", "=", "wrapper_class", "(", "engine", ",", "*", "*", "wrapper_kwargs", ")", "except", "TypeError", "as", "e", ":", "\"\"\"but old python engines are engines, so they don't, but they should \"\"\"", "logger", ".", "warn", "(", "'Model wrapper %s does not accept engine as a first argument'", ",", "wrapper_class", ")", "model", "=", "wrapper_class", "(", "*", "*", "wrapper_kwargs", ")", "return", "model" ]
initialize a bmi mode using an optional class
[ "initialize", "a", "bmi", "mode", "using", "an", "optional", "class" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/datasets.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/datasets.py#L171-L178
def getVariantSetByName(self, name): """ Returns a VariantSet with the specified name, or raises a VariantSetNameNotFoundException if it does not exist. """ if name not in self._variantSetNameMap: raise exceptions.VariantSetNameNotFoundException(name) return self._variantSetNameMap[name]
[ "def", "getVariantSetByName", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "_variantSetNameMap", ":", "raise", "exceptions", ".", "VariantSetNameNotFoundException", "(", "name", ")", "return", "self", ".", "_variantSetNameMap", "[", "name", "]" ]
Returns a VariantSet with the specified name, or raises a VariantSetNameNotFoundException if it does not exist.
[ "Returns", "a", "VariantSet", "with", "the", "specified", "name", "or", "raises", "a", "VariantSetNameNotFoundException", "if", "it", "does", "not", "exist", "." ]
python
train
lago-project/lago
lago/prefix.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/prefix.py#L949-L1046
def _ova_to_spec(self, filename): """ Retrieve the given ova and makes a template of it. Creates a disk from network provided ova. Calculates the needed memory from the ovf. The disk will be cached in the template repo Args: filename(str): the url to retrive the data from TODO: * Add hash checking against the server for faster download and latest version * Add config script running on host - other place * Add cloud init support - by using cdroms in other place * Handle cpu in some way - some other place need to pick it up * Handle the memory units properly - we just assume MegaBytes Returns: list of dict: list with the disk specification int: VM memory, None if none defined int: Number of virtual cpus, None if none defined Raises: RuntimeError: If the ova format is not supported TypeError: If the memory units in the ova are noot supported (currently only 'MegaBytes') """ # extract if needed ova_extracted_dir = os.path.splitext(filename)[0] if not os.path.exists(ova_extracted_dir): os.makedirs(ova_extracted_dir) subprocess.check_output( ["tar", "-xvf", filename, "-C", ova_extracted_dir], stderr=subprocess.STDOUT ) # lets find the ovf file # we expect only one to be ovf = glob.glob(ova_extracted_dir + "/master/vms/*/*.ovf") if len(ovf) != 1: raise RuntimeError("We support only one vm in ova") image_file = None memory = None vcpus = None # we found our ovf # lets extract the resources with open(ovf[0]) as fd: # lets extract the items obj = xmltodict.parse(fd.read()) hardware_items = [ section for section in obj["ovf:Envelope"]["Content"]["Section"] if section["@xsi:type"] == "ovf:VirtualHardwareSection_Type" ] if len(hardware_items) != 1: raise RuntimeError("We support only one machine desc in ova") hardware_items = hardware_items[0] for item in hardware_items["Item"]: # lets test resource types CPU_RESOURCE = 3 MEMORY_RESOURCE = 4 DISK_RESOURCE = 17 resource_type = int(item["rasd:ResourceType"]) if resource_type == CPU_RESOURCE: vcpus = int(item["rasd:cpu_per_socket"]) * \ int(item["rasd:num_of_sockets"]) elif resource_type == MEMORY_RESOURCE: memory = int(item["rasd:VirtualQuantity"]) if item["rasd:AllocationUnits"] != "MegaBytes": raise TypeError( "Fix me : we need to suport other units too" ) elif resource_type == DISK_RESOURCE: image_file = item["rasd:HostResource"] if image_file is not None: disk_meta = {"root-partition": "/dev/sda1"} disk_spec = [ { "type": "template", "template_type": "qcow2", "format": "qcow2", "dev": "vda", "name": os.path.basename(image_file), "path": ova_extracted_dir + "/images/" + image_file, "metadata": disk_meta } ] return disk_spec, memory, vcpus
[ "def", "_ova_to_spec", "(", "self", ",", "filename", ")", ":", "# extract if needed", "ova_extracted_dir", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "ova_extracted_dir", ")", ":", "os", ".", "makedirs", "(", "ova_extracted_dir", ")", "subprocess", ".", "check_output", "(", "[", "\"tar\"", ",", "\"-xvf\"", ",", "filename", ",", "\"-C\"", ",", "ova_extracted_dir", "]", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "# lets find the ovf file", "# we expect only one to be", "ovf", "=", "glob", ".", "glob", "(", "ova_extracted_dir", "+", "\"/master/vms/*/*.ovf\"", ")", "if", "len", "(", "ovf", ")", "!=", "1", ":", "raise", "RuntimeError", "(", "\"We support only one vm in ova\"", ")", "image_file", "=", "None", "memory", "=", "None", "vcpus", "=", "None", "# we found our ovf", "# lets extract the resources", "with", "open", "(", "ovf", "[", "0", "]", ")", "as", "fd", ":", "# lets extract the items", "obj", "=", "xmltodict", ".", "parse", "(", "fd", ".", "read", "(", ")", ")", "hardware_items", "=", "[", "section", "for", "section", "in", "obj", "[", "\"ovf:Envelope\"", "]", "[", "\"Content\"", "]", "[", "\"Section\"", "]", "if", "section", "[", "\"@xsi:type\"", "]", "==", "\"ovf:VirtualHardwareSection_Type\"", "]", "if", "len", "(", "hardware_items", ")", "!=", "1", ":", "raise", "RuntimeError", "(", "\"We support only one machine desc in ova\"", ")", "hardware_items", "=", "hardware_items", "[", "0", "]", "for", "item", "in", "hardware_items", "[", "\"Item\"", "]", ":", "# lets test resource types", "CPU_RESOURCE", "=", "3", "MEMORY_RESOURCE", "=", "4", "DISK_RESOURCE", "=", "17", "resource_type", "=", "int", "(", "item", "[", "\"rasd:ResourceType\"", "]", ")", "if", "resource_type", "==", "CPU_RESOURCE", ":", "vcpus", "=", "int", "(", "item", "[", "\"rasd:cpu_per_socket\"", "]", ")", "*", "int", "(", "item", "[", "\"rasd:num_of_sockets\"", "]", ")", "elif", "resource_type", "==", "MEMORY_RESOURCE", ":", "memory", "=", "int", "(", "item", "[", "\"rasd:VirtualQuantity\"", "]", ")", "if", "item", "[", "\"rasd:AllocationUnits\"", "]", "!=", "\"MegaBytes\"", ":", "raise", "TypeError", "(", "\"Fix me : we need to suport other units too\"", ")", "elif", "resource_type", "==", "DISK_RESOURCE", ":", "image_file", "=", "item", "[", "\"rasd:HostResource\"", "]", "if", "image_file", "is", "not", "None", ":", "disk_meta", "=", "{", "\"root-partition\"", ":", "\"/dev/sda1\"", "}", "disk_spec", "=", "[", "{", "\"type\"", ":", "\"template\"", ",", "\"template_type\"", ":", "\"qcow2\"", ",", "\"format\"", ":", "\"qcow2\"", ",", "\"dev\"", ":", "\"vda\"", ",", "\"name\"", ":", "os", ".", "path", ".", "basename", "(", "image_file", ")", ",", "\"path\"", ":", "ova_extracted_dir", "+", "\"/images/\"", "+", "image_file", ",", "\"metadata\"", ":", "disk_meta", "}", "]", "return", "disk_spec", ",", "memory", ",", "vcpus" ]
Retrieve the given ova and makes a template of it. Creates a disk from network provided ova. Calculates the needed memory from the ovf. The disk will be cached in the template repo Args: filename(str): the url to retrive the data from TODO: * Add hash checking against the server for faster download and latest version * Add config script running on host - other place * Add cloud init support - by using cdroms in other place * Handle cpu in some way - some other place need to pick it up * Handle the memory units properly - we just assume MegaBytes Returns: list of dict: list with the disk specification int: VM memory, None if none defined int: Number of virtual cpus, None if none defined Raises: RuntimeError: If the ova format is not supported TypeError: If the memory units in the ova are noot supported (currently only 'MegaBytes')
[ "Retrieve", "the", "given", "ova", "and", "makes", "a", "template", "of", "it", ".", "Creates", "a", "disk", "from", "network", "provided", "ova", ".", "Calculates", "the", "needed", "memory", "from", "the", "ovf", ".", "The", "disk", "will", "be", "cached", "in", "the", "template", "repo" ]
python
train
idlesign/uwsgiconf
uwsgiconf/options/routing.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing.py#L386-L394
def header_remove(self, value): """Automatically remove specified HTTP header from the response. :param str|unicode value: """ self._set('del-header', value, multi=True) return self._section
[ "def", "header_remove", "(", "self", ",", "value", ")", ":", "self", ".", "_set", "(", "'del-header'", ",", "value", ",", "multi", "=", "True", ")", "return", "self", ".", "_section" ]
Automatically remove specified HTTP header from the response. :param str|unicode value:
[ "Automatically", "remove", "specified", "HTTP", "header", "from", "the", "response", "." ]
python
train
MacHu-GWU/constant2-project
constant2/_constant2.py
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L105-L139
def items(self): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.items() [("a", 1), ("b", 2)] .. versionchanged:: 0.0.5 """ l = list() # 为什么这里是 get_all_attributes(self.__class__) 而不是 # get_all_attributes(self) ? 因为有些实例不支持 # get_all_attributes(instance) 方法, 会报错。 # 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中 # 对应的值。 for attr, value in get_all_attributes(self.__class__): value = getattr(self, attr) # if it is not a instance of class(Constant) if not isinstance(value, Constant): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
[ "def", "items", "(", "self", ")", ":", "l", "=", "list", "(", ")", "# 为什么这里是 get_all_attributes(self.__class__) 而不是", "# get_all_attributes(self) ? 因为有些实例不支持", "# get_all_attributes(instance) 方法, 会报错。", "# 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中", "# 对应的值。", "for", "attr", ",", "value", "in", "get_all_attributes", "(", "self", ".", "__class__", ")", ":", "value", "=", "getattr", "(", "self", ",", "attr", ")", "# if it is not a instance of class(Constant)", "if", "not", "isinstance", "(", "value", ",", "Constant", ")", ":", "l", ".", "append", "(", "(", "attr", ",", "value", ")", ")", "return", "list", "(", "sorted", "(", "l", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")" ]
non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.items() [("a", 1), ("b", 2)] .. versionchanged:: 0.0.5
[ "non", "-", "class", "attributes", "ordered", "by", "alphabetical", "order", "." ]
python
train
ssalentin/plip
plip/modules/preparation.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L906-L945
def find_charged(self, mol): """Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.""" data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain') a_set = [] # Iterate through all residue, exclude those in chains defined as peptides for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]: if config.INTRA is not None: if res.GetChain() != config.INTRA: continue a_contributing = [] a_contributing_orig_idx = [] if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='positive', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='negative', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) return a_set
[ "def", "find_charged", "(", "self", ",", "mol", ")", ":", "data", "=", "namedtuple", "(", "'pcharge'", ",", "'atoms atoms_orig_idx type center restype resnr reschain'", ")", "a_set", "=", "[", "]", "# Iterate through all residue, exclude those in chains defined as peptides", "for", "res", "in", "[", "r", "for", "r", "in", "pybel", ".", "ob", ".", "OBResidueIter", "(", "mol", ".", "OBMol", ")", "if", "not", "r", ".", "GetChain", "(", ")", "in", "config", ".", "PEPTIDES", "]", ":", "if", "config", ".", "INTRA", "is", "not", "None", ":", "if", "res", ".", "GetChain", "(", ")", "!=", "config", ".", "INTRA", ":", "continue", "a_contributing", "=", "[", "]", "a_contributing_orig_idx", "=", "[", "]", "if", "res", ".", "GetName", "(", ")", "in", "(", "'ARG'", ",", "'HIS'", ",", "'LYS'", ")", ":", "# Arginine, Histidine or Lysine have charged sidechains", "for", "a", "in", "pybel", ".", "ob", ".", "OBResidueAtomIter", "(", "res", ")", ":", "if", "a", ".", "GetType", "(", ")", ".", "startswith", "(", "'N'", ")", "and", "res", ".", "GetAtomProperty", "(", "a", ",", "8", ")", "and", "not", "self", ".", "Mapper", ".", "mapid", "(", "a", ".", "GetIdx", "(", ")", ",", "mtype", "=", "'protein'", ")", "in", "self", ".", "altconf", ":", "a_contributing", ".", "append", "(", "pybel", ".", "Atom", "(", "a", ")", ")", "a_contributing_orig_idx", ".", "append", "(", "self", ".", "Mapper", ".", "mapid", "(", "a", ".", "GetIdx", "(", ")", ",", "mtype", "=", "'protein'", ")", ")", "if", "not", "len", "(", "a_contributing", ")", "==", "0", ":", "a_set", ".", "append", "(", "data", "(", "atoms", "=", "a_contributing", ",", "atoms_orig_idx", "=", "a_contributing_orig_idx", ",", "type", "=", "'positive'", ",", "center", "=", "centroid", "(", "[", "ac", ".", "coords", "for", "ac", "in", "a_contributing", "]", ")", ",", "restype", "=", "res", ".", "GetName", "(", ")", ",", "resnr", "=", "res", ".", "GetNum", "(", ")", ",", "reschain", "=", "res", ".", "GetChain", "(", ")", ")", ")", "if", "res", ".", "GetName", "(", ")", "in", "(", "'GLU'", ",", "'ASP'", ")", ":", "# Aspartic or Glutamic Acid", "for", "a", "in", "pybel", ".", "ob", ".", "OBResidueAtomIter", "(", "res", ")", ":", "if", "a", ".", "GetType", "(", ")", ".", "startswith", "(", "'O'", ")", "and", "res", ".", "GetAtomProperty", "(", "a", ",", "8", ")", "and", "not", "self", ".", "Mapper", ".", "mapid", "(", "a", ".", "GetIdx", "(", ")", ",", "mtype", "=", "'protein'", ")", "in", "self", ".", "altconf", ":", "a_contributing", ".", "append", "(", "pybel", ".", "Atom", "(", "a", ")", ")", "a_contributing_orig_idx", ".", "append", "(", "self", ".", "Mapper", ".", "mapid", "(", "a", ".", "GetIdx", "(", ")", ",", "mtype", "=", "'protein'", ")", ")", "if", "not", "len", "(", "a_contributing", ")", "==", "0", ":", "a_set", ".", "append", "(", "data", "(", "atoms", "=", "a_contributing", ",", "atoms_orig_idx", "=", "a_contributing_orig_idx", ",", "type", "=", "'negative'", ",", "center", "=", "centroid", "(", "[", "ac", ".", "coords", "for", "ac", "in", "a_contributing", "]", ")", ",", "restype", "=", "res", ".", "GetName", "(", ")", ",", "resnr", "=", "res", ".", "GetNum", "(", ")", ",", "reschain", "=", "res", ".", "GetChain", "(", ")", ")", ")", "return", "a_set" ]
Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.
[ "Looks", "for", "positive", "charges", "in", "arginine", "histidine", "or", "lysine", "for", "negative", "in", "aspartic", "and", "glutamic", "acid", "." ]
python
train
fabiobatalha/crossrefapi
crossref/restful.py
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L901-L959
def doi(self, doi, only_message=True): """ This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result['message'] if only_message is True else result
[ "def", "doi", "(", "self", ",", "doi", ",", "only_message", "=", "True", ")", ":", "request_url", "=", "build_url_endpoint", "(", "'/'", ".", "join", "(", "[", "self", ".", "ENDPOINT", ",", "doi", "]", ")", ")", "request_params", "=", "{", "}", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "if", "result", ".", "status_code", "==", "404", ":", "return", "result", "=", "result", ".", "json", "(", ")", "return", "result", "[", "'message'", "]", "if", "only_message", "is", "True", "else", "result" ]
This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
[ "This", "method", "retrieve", "the", "DOI", "metadata", "related", "to", "a", "given", "DOI", "number", "." ]
python
train
tensorforce/tensorforce
tensorforce/contrib/game_2048.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/contrib/game_2048.py#L127-L137
def do_action(self, action): """Execute action, add a new tile, update the score & return the reward.""" temp_state = np.rot90(self._state, action) reward = self._do_action_left(temp_state) self._state = np.rot90(temp_state, -action) self._score += reward self.add_random_tile() return reward
[ "def", "do_action", "(", "self", ",", "action", ")", ":", "temp_state", "=", "np", ".", "rot90", "(", "self", ".", "_state", ",", "action", ")", "reward", "=", "self", ".", "_do_action_left", "(", "temp_state", ")", "self", ".", "_state", "=", "np", ".", "rot90", "(", "temp_state", ",", "-", "action", ")", "self", ".", "_score", "+=", "reward", "self", ".", "add_random_tile", "(", ")", "return", "reward" ]
Execute action, add a new tile, update the score & return the reward.
[ "Execute", "action", "add", "a", "new", "tile", "update", "the", "score", "&", "return", "the", "reward", "." ]
python
valid
yamcs/yamcs-python
yamcs-client/yamcs/client.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/client.py#L242-L250
def list_instance_templates(self): """ List the available instance templates. """ response = self.get_proto(path='/instance-templates') message = rest_pb2.ListInstanceTemplatesResponse() message.ParseFromString(response.content) templates = getattr(message, 'template') return iter([InstanceTemplate(template) for template in templates])
[ "def", "list_instance_templates", "(", "self", ")", ":", "response", "=", "self", ".", "get_proto", "(", "path", "=", "'/instance-templates'", ")", "message", "=", "rest_pb2", ".", "ListInstanceTemplatesResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "templates", "=", "getattr", "(", "message", ",", "'template'", ")", "return", "iter", "(", "[", "InstanceTemplate", "(", "template", ")", "for", "template", "in", "templates", "]", ")" ]
List the available instance templates.
[ "List", "the", "available", "instance", "templates", "." ]
python
train
python-thumbnails/python-thumbnails
thumbnails/engines/base.py
https://github.com/python-thumbnails/python-thumbnails/blob/d8dc0ff5410f730de2a0e5759e8a818b19de35b9/thumbnails/engines/base.py#L174-L186
def parse_size(size): """ Parses size string into a tuple :param size: String on the form '100', 'x100 or '100x200' :return: Tuple of two integers for width and height :rtype: tuple """ if size.startswith('x'): return None, int(size.replace('x', '')) if 'x' in size: return int(size.split('x')[0]), int(size.split('x')[1]) return int(size), None
[ "def", "parse_size", "(", "size", ")", ":", "if", "size", ".", "startswith", "(", "'x'", ")", ":", "return", "None", ",", "int", "(", "size", ".", "replace", "(", "'x'", ",", "''", ")", ")", "if", "'x'", "in", "size", ":", "return", "int", "(", "size", ".", "split", "(", "'x'", ")", "[", "0", "]", ")", ",", "int", "(", "size", ".", "split", "(", "'x'", ")", "[", "1", "]", ")", "return", "int", "(", "size", ")", ",", "None" ]
Parses size string into a tuple :param size: String on the form '100', 'x100 or '100x200' :return: Tuple of two integers for width and height :rtype: tuple
[ "Parses", "size", "string", "into", "a", "tuple" ]
python
train
bluedazzle/wechat_sender
wechat_sender/utils.py
https://github.com/bluedazzle/wechat_sender/blob/21d861735509153d6b34408157911c25a5d7018b/wechat_sender/utils.py#L22-L28
def _read_config_list(): """ 配置列表读取 """ with codecs.open('conf.ini', 'w+', encoding='utf-8') as f1: conf_list = [conf for conf in f1.read().split('\n') if conf != ''] return conf_list
[ "def", "_read_config_list", "(", ")", ":", "with", "codecs", ".", "open", "(", "'conf.ini'", ",", "'w+'", ",", "encoding", "=", "'utf-8'", ")", "as", "f1", ":", "conf_list", "=", "[", "conf", "for", "conf", "in", "f1", ".", "read", "(", ")", ".", "split", "(", "'\\n'", ")", "if", "conf", "!=", "''", "]", "return", "conf_list" ]
配置列表读取
[ "配置列表读取" ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/docimport.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/docimport.py#L407-L417
def can_import(self, file_uris, current_doc=None): """ Check that the specified file looks like an image supported by PIL """ if len(file_uris) <= 0: return False for file_uri in file_uris: file_uri = self.fs.safe(file_uri) if not self.check_file_type(file_uri): return False return True
[ "def", "can_import", "(", "self", ",", "file_uris", ",", "current_doc", "=", "None", ")", ":", "if", "len", "(", "file_uris", ")", "<=", "0", ":", "return", "False", "for", "file_uri", "in", "file_uris", ":", "file_uri", "=", "self", ".", "fs", ".", "safe", "(", "file_uri", ")", "if", "not", "self", ".", "check_file_type", "(", "file_uri", ")", ":", "return", "False", "return", "True" ]
Check that the specified file looks like an image supported by PIL
[ "Check", "that", "the", "specified", "file", "looks", "like", "an", "image", "supported", "by", "PIL" ]
python
train
adamziel/python_translate
python_translate/loaders.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/loaders.py#L271-L286
def _load_contents(self, polib, resource): """ Parses machine object (MO) format using polib @type resource: str @param resource: resource @rtype: list """ import struct try: return polib.mofile(resource) except (ValueError, AttributeError, struct.error) as e: self.rethrow( "Invalid resource {0}".format(resource), InvalidResourceException)
[ "def", "_load_contents", "(", "self", ",", "polib", ",", "resource", ")", ":", "import", "struct", "try", ":", "return", "polib", ".", "mofile", "(", "resource", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "struct", ".", "error", ")", "as", "e", ":", "self", ".", "rethrow", "(", "\"Invalid resource {0}\"", ".", "format", "(", "resource", ")", ",", "InvalidResourceException", ")" ]
Parses machine object (MO) format using polib @type resource: str @param resource: resource @rtype: list
[ "Parses", "machine", "object", "(", "MO", ")", "format", "using", "polib" ]
python
train
brutus/boozelib
boozelib/boozelib.py
https://github.com/brutus/boozelib/blob/02a4aa03ac54b411ef5ac2be85eee0a9af1e60df/boozelib/boozelib.py#L163-L168
def gramm_to_promille(gramm, age, weight, height, sex): """Return the blood alcohol content (per mill) for a person with the given body stats and amount of alcohol (in gramm) in blood """ bw = calculate_bw(age, weight, height, sex) return (gramm * W) / (PB * bw)
[ "def", "gramm_to_promille", "(", "gramm", ",", "age", ",", "weight", ",", "height", ",", "sex", ")", ":", "bw", "=", "calculate_bw", "(", "age", ",", "weight", ",", "height", ",", "sex", ")", "return", "(", "gramm", "*", "W", ")", "/", "(", "PB", "*", "bw", ")" ]
Return the blood alcohol content (per mill) for a person with the given body stats and amount of alcohol (in gramm) in blood
[ "Return", "the", "blood", "alcohol", "content", "(", "per", "mill", ")", "for", "a", "person", "with", "the", "given", "body", "stats", "and", "amount", "of", "alcohol", "(", "in", "gramm", ")", "in", "blood" ]
python
train
zblz/naima
naima/radiative.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1739-L1765
def _calc_specpp_hiE(self, Egamma): """ Spectrum computed as in Eq. 42 for Egamma >= 0.1 TeV """ # Fixed quad with n=40 is about 15 times faster and is always within # 0.5% of the result of adaptive quad for Egamma>0.1 # WARNING: It also produces artifacts for steep distributions (e.g. # Maxwellian) at ~500 GeV. Reverting to adaptative quadrature # from scipy.integrate import fixed_quad # result=c*fixed_quad(self._photon_integrand, 0., 1., args = [Egamma, # ], n = 40)[0] from scipy.integrate import quad Egamma = Egamma.to("TeV").value specpp = ( c.cgs.value * quad( self._photon_integrand, 0.0, 1.0, args=Egamma, epsrel=1e-3, epsabs=0, )[0] ) return specpp * u.Unit("1/(s TeV)")
[ "def", "_calc_specpp_hiE", "(", "self", ",", "Egamma", ")", ":", "# Fixed quad with n=40 is about 15 times faster and is always within", "# 0.5% of the result of adaptive quad for Egamma>0.1", "# WARNING: It also produces artifacts for steep distributions (e.g.", "# Maxwellian) at ~500 GeV. Reverting to adaptative quadrature", "# from scipy.integrate import fixed_quad", "# result=c*fixed_quad(self._photon_integrand, 0., 1., args = [Egamma,", "# ], n = 40)[0]", "from", "scipy", ".", "integrate", "import", "quad", "Egamma", "=", "Egamma", ".", "to", "(", "\"TeV\"", ")", ".", "value", "specpp", "=", "(", "c", ".", "cgs", ".", "value", "*", "quad", "(", "self", ".", "_photon_integrand", ",", "0.0", ",", "1.0", ",", "args", "=", "Egamma", ",", "epsrel", "=", "1e-3", ",", "epsabs", "=", "0", ",", ")", "[", "0", "]", ")", "return", "specpp", "*", "u", ".", "Unit", "(", "\"1/(s TeV)\"", ")" ]
Spectrum computed as in Eq. 42 for Egamma >= 0.1 TeV
[ "Spectrum", "computed", "as", "in", "Eq", ".", "42", "for", "Egamma", ">", "=", "0", ".", "1", "TeV" ]
python
train
intelligenia/modeltranslation
modeltranslation/models.py
https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/models.py#L222-L235
def delete_orphan_translations(condition=None): """ Delete orphan translations. This method needs refactoring to be improve its performance. """ if condition is None: condition = {} # TODO: optimize using one SQL sentence translations = FieldTranslation.objects.all() for translation in translations: translation._load_source_model() condition["id"] = translation.object_id if not translation.source_model.objects.filter(**condition).exists(): translation.delete()
[ "def", "delete_orphan_translations", "(", "condition", "=", "None", ")", ":", "if", "condition", "is", "None", ":", "condition", "=", "{", "}", "# TODO: optimize using one SQL sentence", "translations", "=", "FieldTranslation", ".", "objects", ".", "all", "(", ")", "for", "translation", "in", "translations", ":", "translation", ".", "_load_source_model", "(", ")", "condition", "[", "\"id\"", "]", "=", "translation", ".", "object_id", "if", "not", "translation", ".", "source_model", ".", "objects", ".", "filter", "(", "*", "*", "condition", ")", ".", "exists", "(", ")", ":", "translation", ".", "delete", "(", ")" ]
Delete orphan translations. This method needs refactoring to be improve its performance.
[ "Delete", "orphan", "translations", ".", "This", "method", "needs", "refactoring", "to", "be", "improve", "its", "performance", "." ]
python
train
vpelletier/pprofile
pprofile.py
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L207-L230
def call(self, code, line, callee_file_timing, callee, duration, frame): """ A call originating from this file returned. code (code) caller's code object line (int) caller's line number callee_file_timing (FileTiming) callee's FileTiming callee (code) callee's code object duration (float) duration of the call, in seconds frame (frame) calle's entire frame as of its return """ try: entry = self.call_dict[(code, line, callee)] except KeyError: self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration] else: entry[1] += 1 entry[2] += duration
[ "def", "call", "(", "self", ",", "code", ",", "line", ",", "callee_file_timing", ",", "callee", ",", "duration", ",", "frame", ")", ":", "try", ":", "entry", "=", "self", ".", "call_dict", "[", "(", "code", ",", "line", ",", "callee", ")", "]", "except", "KeyError", ":", "self", ".", "call_dict", "[", "(", "code", ",", "line", ",", "callee", ")", "]", "=", "[", "callee_file_timing", ",", "1", ",", "duration", "]", "else", ":", "entry", "[", "1", "]", "+=", "1", "entry", "[", "2", "]", "+=", "duration" ]
A call originating from this file returned. code (code) caller's code object line (int) caller's line number callee_file_timing (FileTiming) callee's FileTiming callee (code) callee's code object duration (float) duration of the call, in seconds frame (frame) calle's entire frame as of its return
[ "A", "call", "originating", "from", "this", "file", "returned", "." ]
python
train
oasis-open/cti-stix-validator
stix2validator/v21/shoulds.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L81-L97
def custom_property_prefix_strict(instance): """Ensure custom properties follow strict naming style conventions. Does not check property names in custom objects. """ for prop_name in instance.keys(): if (instance['type'] in enums.PROPERTIES and prop_name not in enums.PROPERTIES[instance['type']] and prop_name not in enums.RESERVED_PROPERTIES and not CUSTOM_PROPERTY_PREFIX_RE.match(prop_name)): yield JSONError("Custom property '%s' should have a type that " "starts with 'x_' followed by a source unique " "identifier (like a domain name with dots " "replaced by hyphen), a hyphen and then the name." % prop_name, instance['id'], 'custom-prefix')
[ "def", "custom_property_prefix_strict", "(", "instance", ")", ":", "for", "prop_name", "in", "instance", ".", "keys", "(", ")", ":", "if", "(", "instance", "[", "'type'", "]", "in", "enums", ".", "PROPERTIES", "and", "prop_name", "not", "in", "enums", ".", "PROPERTIES", "[", "instance", "[", "'type'", "]", "]", "and", "prop_name", "not", "in", "enums", ".", "RESERVED_PROPERTIES", "and", "not", "CUSTOM_PROPERTY_PREFIX_RE", ".", "match", "(", "prop_name", ")", ")", ":", "yield", "JSONError", "(", "\"Custom property '%s' should have a type that \"", "\"starts with 'x_' followed by a source unique \"", "\"identifier (like a domain name with dots \"", "\"replaced by hyphen), a hyphen and then the name.\"", "%", "prop_name", ",", "instance", "[", "'id'", "]", ",", "'custom-prefix'", ")" ]
Ensure custom properties follow strict naming style conventions. Does not check property names in custom objects.
[ "Ensure", "custom", "properties", "follow", "strict", "naming", "style", "conventions", "." ]
python
train
numenta/nupic
examples/network/hierarchy_network_demo.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/network/hierarchy_network_demo.py#L191-L266
def createNetwork(dataSource): """Creates and returns a new Network with a sensor region reading data from 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run """ network = Network() # Create and add a record sensor and a SP region sensor = createRecordSensor(network, name=_RECORD_SENSOR, dataSource=dataSource) createSpatialPooler(network, name=_L1_SPATIAL_POOLER, inputWidth=sensor.encoder.getWidth()) # Link the SP region to the sensor input linkType = "UniformLink" linkParams = "" network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams) # Create and add a TM region l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY) # Link SP region to TM region in the feedforward direction network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams) # Add a classifier classifierParams = { # Learning rate. Higher values make it adapt faster. 'alpha': 0.005, # A comma separated list of the number of steps the # classifier predicts in the future. The classifier will # learn predictions of each order specified. 'steps': '1', # The specific implementation of the classifier to use # See SDRClassifierFactory#create for options 'implementation': 'py', # Diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'verbosity': 0} l1Classifier = network.addRegion(_L1_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l1Classifier.setParameter('inferenceMode', True) l1Classifier.setParameter('learningMode', True) network.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") # Second Level l2inputWidth = l1temporalMemory.getSelf().getOutputElementCount("bottomUpOut") createSpatialPooler(network, name=_L2_SPATIAL_POOLER, inputWidth=l2inputWidth) network.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, linkType, linkParams) createTemporalMemory(network, _L2_TEMPORAL_MEMORY) network.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, linkType, linkParams) l2Classifier = network.addRegion(_L2_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l2Classifier.setParameter('inferenceMode', True) l2Classifier.setParameter('learningMode', True) network.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") return network
[ "def", "createNetwork", "(", "dataSource", ")", ":", "network", "=", "Network", "(", ")", "# Create and add a record sensor and a SP region", "sensor", "=", "createRecordSensor", "(", "network", ",", "name", "=", "_RECORD_SENSOR", ",", "dataSource", "=", "dataSource", ")", "createSpatialPooler", "(", "network", ",", "name", "=", "_L1_SPATIAL_POOLER", ",", "inputWidth", "=", "sensor", ".", "encoder", ".", "getWidth", "(", ")", ")", "# Link the SP region to the sensor input", "linkType", "=", "\"UniformLink\"", "linkParams", "=", "\"\"", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L1_SPATIAL_POOLER", ",", "linkType", ",", "linkParams", ")", "# Create and add a TM region", "l1temporalMemory", "=", "createTemporalMemory", "(", "network", ",", "_L1_TEMPORAL_MEMORY", ")", "# Link SP region to TM region in the feedforward direction", "network", ".", "link", "(", "_L1_SPATIAL_POOLER", ",", "_L1_TEMPORAL_MEMORY", ",", "linkType", ",", "linkParams", ")", "# Add a classifier", "classifierParams", "=", "{", "# Learning rate. Higher values make it adapt faster.", "'alpha'", ":", "0.005", ",", "# A comma separated list of the number of steps the", "# classifier predicts in the future. The classifier will", "# learn predictions of each order specified.", "'steps'", ":", "'1'", ",", "# The specific implementation of the classifier to use", "# See SDRClassifierFactory#create for options", "'implementation'", ":", "'py'", ",", "# Diagnostic output verbosity control;", "# 0: silent; [1..6]: increasing levels of verbosity", "'verbosity'", ":", "0", "}", "l1Classifier", "=", "network", ".", "addRegion", "(", "_L1_CLASSIFIER", ",", "\"py.SDRClassifierRegion\"", ",", "json", ".", "dumps", "(", "classifierParams", ")", ")", "l1Classifier", ".", "setParameter", "(", "'inferenceMode'", ",", "True", ")", "l1Classifier", ".", "setParameter", "(", "'learningMode'", ",", "True", ")", "network", ".", "link", "(", "_L1_TEMPORAL_MEMORY", ",", "_L1_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"bottomUpOut\"", ",", "destInput", "=", "\"bottomUpIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L1_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"categoryOut\"", ",", "destInput", "=", "\"categoryIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L1_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"bucketIdxOut\"", ",", "destInput", "=", "\"bucketIdxIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L1_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"actValueOut\"", ",", "destInput", "=", "\"actValueIn\"", ")", "# Second Level", "l2inputWidth", "=", "l1temporalMemory", ".", "getSelf", "(", ")", ".", "getOutputElementCount", "(", "\"bottomUpOut\"", ")", "createSpatialPooler", "(", "network", ",", "name", "=", "_L2_SPATIAL_POOLER", ",", "inputWidth", "=", "l2inputWidth", ")", "network", ".", "link", "(", "_L1_TEMPORAL_MEMORY", ",", "_L2_SPATIAL_POOLER", ",", "linkType", ",", "linkParams", ")", "createTemporalMemory", "(", "network", ",", "_L2_TEMPORAL_MEMORY", ")", "network", ".", "link", "(", "_L2_SPATIAL_POOLER", ",", "_L2_TEMPORAL_MEMORY", ",", "linkType", ",", "linkParams", ")", "l2Classifier", "=", "network", ".", "addRegion", "(", "_L2_CLASSIFIER", ",", "\"py.SDRClassifierRegion\"", ",", "json", ".", "dumps", "(", "classifierParams", ")", ")", "l2Classifier", ".", "setParameter", "(", "'inferenceMode'", ",", "True", ")", "l2Classifier", ".", "setParameter", "(", "'learningMode'", ",", "True", ")", "network", ".", "link", "(", "_L2_TEMPORAL_MEMORY", ",", "_L2_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"bottomUpOut\"", ",", "destInput", "=", "\"bottomUpIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L2_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"categoryOut\"", ",", "destInput", "=", "\"categoryIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L2_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"bucketIdxOut\"", ",", "destInput", "=", "\"bucketIdxIn\"", ")", "network", ".", "link", "(", "_RECORD_SENSOR", ",", "_L2_CLASSIFIER", ",", "linkType", ",", "linkParams", ",", "srcOutput", "=", "\"actValueOut\"", ",", "destInput", "=", "\"actValueIn\"", ")", "return", "network" ]
Creates and returns a new Network with a sensor region reading data from 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run
[ "Creates", "and", "returns", "a", "new", "Network", "with", "a", "sensor", "region", "reading", "data", "from", "dataSource", ".", "There", "are", "two", "hierarchical", "levels", "each", "with", "one", "SP", "and", "one", "TM", "." ]
python
valid
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L812-L818
def IntegerSum(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Performs a sum across all dimensions :param input_vertex: the vertex to have its values summed """ return Integer(context.jvm_view().IntegerSumVertex, label, cast_to_integer_vertex(input_vertex))
[ "def", "IntegerSum", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Integer", "(", "context", ".", "jvm_view", "(", ")", ".", "IntegerSumVertex", ",", "label", ",", "cast_to_integer_vertex", "(", "input_vertex", ")", ")" ]
Performs a sum across all dimensions :param input_vertex: the vertex to have its values summed
[ "Performs", "a", "sum", "across", "all", "dimensions", ":", "param", "input_vertex", ":", "the", "vertex", "to", "have", "its", "values", "summed" ]
python
train
saltstack/salt
salt/returners/couchdb_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/couchdb_return.py#L96-L136
def _get_options(ret=None): ''' Get the couchdb options from salt. ''' attrs = {'url': 'url', 'db': 'db', 'user': 'user', 'passwd': 'passwd', 'redact_pws': 'redact_pws', 'minimum_return': 'minimum_return'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) if 'url' not in _options: log.debug("Using default url.") _options['url'] = "http://salt:5984/" if 'db' not in _options: log.debug("Using default database.") _options['db'] = "salt" if 'user' not in _options: log.debug("Not athenticating with a user.") _options['user'] = None if 'passwd' not in _options: log.debug("Not athenticating with a password.") _options['passwd'] = None if 'redact_pws' not in _options: log.debug("Not redacting passwords.") _options['redact_pws'] = None if 'minimum_return' not in _options: log.debug("Not minimizing the return object.") _options['minimum_return'] = None return _options
[ "def", "_get_options", "(", "ret", "=", "None", ")", ":", "attrs", "=", "{", "'url'", ":", "'url'", ",", "'db'", ":", "'db'", ",", "'user'", ":", "'user'", ",", "'passwd'", ":", "'passwd'", ",", "'redact_pws'", ":", "'redact_pws'", ",", "'minimum_return'", ":", "'minimum_return'", "}", "_options", "=", "salt", ".", "returners", ".", "get_returner_options", "(", "__virtualname__", ",", "ret", ",", "attrs", ",", "__salt__", "=", "__salt__", ",", "__opts__", "=", "__opts__", ")", "if", "'url'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Using default url.\"", ")", "_options", "[", "'url'", "]", "=", "\"http://salt:5984/\"", "if", "'db'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Using default database.\"", ")", "_options", "[", "'db'", "]", "=", "\"salt\"", "if", "'user'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not athenticating with a user.\"", ")", "_options", "[", "'user'", "]", "=", "None", "if", "'passwd'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not athenticating with a password.\"", ")", "_options", "[", "'passwd'", "]", "=", "None", "if", "'redact_pws'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not redacting passwords.\"", ")", "_options", "[", "'redact_pws'", "]", "=", "None", "if", "'minimum_return'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not minimizing the return object.\"", ")", "_options", "[", "'minimum_return'", "]", "=", "None", "return", "_options" ]
Get the couchdb options from salt.
[ "Get", "the", "couchdb", "options", "from", "salt", "." ]
python
train
numenta/htmresearch
projects/sequence_prediction/continuous_sequence/errorMetrics.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/continuous_sequence/errorMetrics.py#L31-L47
def NRMSE_sliding(data, pred, windowSize): """ Computing NRMSE in a sliding window :param data: :param pred: :param windowSize: :return: (window_center, NRMSE) """ halfWindowSize = int(round(float(windowSize)/2)) window_center = range(halfWindowSize, len(data)-halfWindowSize, int(round(float(halfWindowSize)/5.0))) nrmse = [] for wc in window_center: nrmse.append(NRMSE(data[wc-halfWindowSize:wc+halfWindowSize], pred[wc-halfWindowSize:wc+halfWindowSize])) return (window_center, nrmse)
[ "def", "NRMSE_sliding", "(", "data", ",", "pred", ",", "windowSize", ")", ":", "halfWindowSize", "=", "int", "(", "round", "(", "float", "(", "windowSize", ")", "/", "2", ")", ")", "window_center", "=", "range", "(", "halfWindowSize", ",", "len", "(", "data", ")", "-", "halfWindowSize", ",", "int", "(", "round", "(", "float", "(", "halfWindowSize", ")", "/", "5.0", ")", ")", ")", "nrmse", "=", "[", "]", "for", "wc", "in", "window_center", ":", "nrmse", ".", "append", "(", "NRMSE", "(", "data", "[", "wc", "-", "halfWindowSize", ":", "wc", "+", "halfWindowSize", "]", ",", "pred", "[", "wc", "-", "halfWindowSize", ":", "wc", "+", "halfWindowSize", "]", ")", ")", "return", "(", "window_center", ",", "nrmse", ")" ]
Computing NRMSE in a sliding window :param data: :param pred: :param windowSize: :return: (window_center, NRMSE)
[ "Computing", "NRMSE", "in", "a", "sliding", "window", ":", "param", "data", ":", ":", "param", "pred", ":", ":", "param", "windowSize", ":", ":", "return", ":", "(", "window_center", "NRMSE", ")" ]
python
train