repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
gear11/pypelogs
pypein/flickr.py
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L149-L155
def _load_rsp(rsp): """ Converts raw Flickr string response to Python dict """ first = rsp.find('(') + 1 last = rsp.rfind(')') return json.loads(rsp[first:last])
[ "def", "_load_rsp", "(", "rsp", ")", ":", "first", "=", "rsp", ".", "find", "(", "'('", ")", "+", "1", "last", "=", "rsp", ".", "rfind", "(", "')'", ")", "return", "json", ".", "loads", "(", "rsp", "[", "first", ":", "last", "]", ")" ]
Converts raw Flickr string response to Python dict
[ "Converts", "raw", "Flickr", "string", "response", "to", "Python", "dict" ]
python
train
29
mandiant/ioc_writer
ioc_writer/utils/xmlutils.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/utils/xmlutils.py#L30-L57
def read_xml(filename): """ Use et to read in a xml file, or string, into a Element object. :param filename: File to parse. :return: lxml._elementTree object or None """ parser = et.XMLParser(remove_blank_text=True) isfile=False try: isfile = os.path.exists(filename) except ValueError as e: if 'path too long for Windows' in str(e): pass else: raise try: if isfile: return et.parse(filename, parser) else: r = et.fromstring(filename, parser) return r.getroottree() except IOError: log.exception('unable to open file [[}]'.format(filename)) except et.XMLSyntaxError: log.exception('unable to parse XML [{}]'.format(filename)) return None return None
[ "def", "read_xml", "(", "filename", ")", ":", "parser", "=", "et", ".", "XMLParser", "(", "remove_blank_text", "=", "True", ")", "isfile", "=", "False", "try", ":", "isfile", "=", "os", ".", "path", ".", "exists", "(", "filename", ")", "except", "ValueError", "as", "e", ":", "if", "'path too long for Windows'", "in", "str", "(", "e", ")", ":", "pass", "else", ":", "raise", "try", ":", "if", "isfile", ":", "return", "et", ".", "parse", "(", "filename", ",", "parser", ")", "else", ":", "r", "=", "et", ".", "fromstring", "(", "filename", ",", "parser", ")", "return", "r", ".", "getroottree", "(", ")", "except", "IOError", ":", "log", ".", "exception", "(", "'unable to open file [[}]'", ".", "format", "(", "filename", ")", ")", "except", "et", ".", "XMLSyntaxError", ":", "log", ".", "exception", "(", "'unable to parse XML [{}]'", ".", "format", "(", "filename", ")", ")", "return", "None", "return", "None" ]
Use et to read in a xml file, or string, into a Element object. :param filename: File to parse. :return: lxml._elementTree object or None
[ "Use", "et", "to", "read", "in", "a", "xml", "file", "or", "string", "into", "a", "Element", "object", "." ]
python
train
28.535714
fastai/fastai
fastai/data_block.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L71-L76
def process(self, processor:PreProcessors=None): "Apply `processor` or `self.processor` to `self`." if processor is not None: self.processor = processor self.processor = listify(self.processor) for p in self.processor: p.process(self) return self
[ "def", "process", "(", "self", ",", "processor", ":", "PreProcessors", "=", "None", ")", ":", "if", "processor", "is", "not", "None", ":", "self", ".", "processor", "=", "processor", "self", ".", "processor", "=", "listify", "(", "self", ".", "processor", ")", "for", "p", "in", "self", ".", "processor", ":", "p", ".", "process", "(", "self", ")", "return", "self" ]
Apply `processor` or `self.processor` to `self`.
[ "Apply", "processor", "or", "self", ".", "processor", "to", "self", "." ]
python
train
46.833333
inasafe/inasafe
safe/gui/tools/wizard/step_kw30_field.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw30_field.py#L185-L197
def selected_fields(self): """Obtain the fields selected by user. :returns: Keyword of the selected field. :rtype: list, str """ items = self.lstFields.selectedItems() if items and self.mode == MULTI_MODE: return [item.text() for item in items] elif items and self.mode == SINGLE_MODE: return items[0].text() else: return []
[ "def", "selected_fields", "(", "self", ")", ":", "items", "=", "self", ".", "lstFields", ".", "selectedItems", "(", ")", "if", "items", "and", "self", ".", "mode", "==", "MULTI_MODE", ":", "return", "[", "item", ".", "text", "(", ")", "for", "item", "in", "items", "]", "elif", "items", "and", "self", ".", "mode", "==", "SINGLE_MODE", ":", "return", "items", "[", "0", "]", ".", "text", "(", ")", "else", ":", "return", "[", "]" ]
Obtain the fields selected by user. :returns: Keyword of the selected field. :rtype: list, str
[ "Obtain", "the", "fields", "selected", "by", "user", "." ]
python
train
31.769231
osrg/ryu
ryu/ofproto/ofproto_v1_5_parser.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/ofproto/ofproto_v1_5_parser.py#L937-L961
def serialize(self, buf, offset): """ Outputs the expression of the wire protocol of the flow stats into the buf. Returns the output length. """ fields = [ofproto.oxs_from_user(k, uv) for (k, uv) in self.fields] hdr_pack_str = '!HH' field_offset = offset + struct.calcsize(hdr_pack_str) for (n, value, _) in fields: # No mask field_offset += ofproto.oxs_serialize(n, value, None, buf, field_offset) reserved = 0 length = field_offset - offset msg_pack_into(hdr_pack_str, buf, offset, reserved, length) self.length = length pad_len = utils.round_up(length, 8) - length msg_pack_into("%dx" % pad_len, buf, field_offset) return length + pad_len
[ "def", "serialize", "(", "self", ",", "buf", ",", "offset", ")", ":", "fields", "=", "[", "ofproto", ".", "oxs_from_user", "(", "k", ",", "uv", ")", "for", "(", "k", ",", "uv", ")", "in", "self", ".", "fields", "]", "hdr_pack_str", "=", "'!HH'", "field_offset", "=", "offset", "+", "struct", ".", "calcsize", "(", "hdr_pack_str", ")", "for", "(", "n", ",", "value", ",", "_", ")", "in", "fields", ":", "# No mask", "field_offset", "+=", "ofproto", ".", "oxs_serialize", "(", "n", ",", "value", ",", "None", ",", "buf", ",", "field_offset", ")", "reserved", "=", "0", "length", "=", "field_offset", "-", "offset", "msg_pack_into", "(", "hdr_pack_str", ",", "buf", ",", "offset", ",", "reserved", ",", "length", ")", "self", ".", "length", "=", "length", "pad_len", "=", "utils", ".", "round_up", "(", "length", ",", "8", ")", "-", "length", "msg_pack_into", "(", "\"%dx\"", "%", "pad_len", ",", "buf", ",", "field_offset", ")", "return", "length", "+", "pad_len" ]
Outputs the expression of the wire protocol of the flow stats into the buf. Returns the output length.
[ "Outputs", "the", "expression", "of", "the", "wire", "protocol", "of", "the", "flow", "stats", "into", "the", "buf", ".", "Returns", "the", "output", "length", "." ]
python
train
33.64
littlemo/mohand
source/mohand/load_file.py
https://github.com/littlemo/mohand/blob/9bd4591e457d594f2ce3a0c089ef28d3b4e027e8/source/mohand/load_file.py#L99-L112
def extract_commands(imported_vars): """ 从传入的变量列表中提取命令( ``click.core.Command`` )对象 :param dict_items imported_vars: 字典的键值条目列表 :return: 判定为终端命令的对象字典 :rtype: dict(str, object) """ commands = dict() for tup in imported_vars: name, obj = tup if is_command_object(obj): commands.setdefault(name, obj) return commands
[ "def", "extract_commands", "(", "imported_vars", ")", ":", "commands", "=", "dict", "(", ")", "for", "tup", "in", "imported_vars", ":", "name", ",", "obj", "=", "tup", "if", "is_command_object", "(", "obj", ")", ":", "commands", ".", "setdefault", "(", "name", ",", "obj", ")", "return", "commands" ]
从传入的变量列表中提取命令( ``click.core.Command`` )对象 :param dict_items imported_vars: 字典的键值条目列表 :return: 判定为终端命令的对象字典 :rtype: dict(str, object)
[ "从传入的变量列表中提取命令", "(", "click", ".", "core", ".", "Command", ")", "对象" ]
python
test
25.928571
lappis-unb/salic-ml
src/salicml/metrics/finance/common_items_ratio.py
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/metrics/finance/common_items_ratio.py#L83-L106
def common_items_metrics(all_items, common_items): """ Calculates the percentage of common items for each project in each segment and calculates the mean and std of this percentage for each segment. """ segments = common_items.index.unique() metrics = {} for seg in segments: seg_common_items = segment_common_items(seg) projects = get_segment_projects(seg) metric_values = [] for proj in projects: pronac = proj[0] percentage = common_items_percentage(pronac, seg_common_items) metric_values.append(percentage) metrics[seg] = { 'mean': np.mean(metric_values), 'std': np.std(metric_values) } return pd.DataFrame.from_dict(metrics, orient='index')
[ "def", "common_items_metrics", "(", "all_items", ",", "common_items", ")", ":", "segments", "=", "common_items", ".", "index", ".", "unique", "(", ")", "metrics", "=", "{", "}", "for", "seg", "in", "segments", ":", "seg_common_items", "=", "segment_common_items", "(", "seg", ")", "projects", "=", "get_segment_projects", "(", "seg", ")", "metric_values", "=", "[", "]", "for", "proj", "in", "projects", ":", "pronac", "=", "proj", "[", "0", "]", "percentage", "=", "common_items_percentage", "(", "pronac", ",", "seg_common_items", ")", "metric_values", ".", "append", "(", "percentage", ")", "metrics", "[", "seg", "]", "=", "{", "'mean'", ":", "np", ".", "mean", "(", "metric_values", ")", ",", "'std'", ":", "np", ".", "std", "(", "metric_values", ")", "}", "return", "pd", ".", "DataFrame", ".", "from_dict", "(", "metrics", ",", "orient", "=", "'index'", ")" ]
Calculates the percentage of common items for each project in each segment and calculates the mean and std of this percentage for each segment.
[ "Calculates", "the", "percentage", "of", "common", "items", "for", "each", "project", "in", "each", "segment", "and", "calculates", "the", "mean", "and", "std", "of", "this", "percentage", "for", "each", "segment", "." ]
python
train
32.083333
petl-developers/petl
petl/io/csv.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/csv.py#L65-L74
def fromtsv(source=None, encoding=None, errors='strict', header=None, **csvargs): """ Convenience function, as :func:`petl.io.csv.fromcsv` but with different default dialect (tab delimited). """ csvargs.setdefault('dialect', 'excel-tab') return fromcsv(source, encoding=encoding, errors=errors, **csvargs)
[ "def", "fromtsv", "(", "source", "=", "None", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ",", "header", "=", "None", ",", "*", "*", "csvargs", ")", ":", "csvargs", ".", "setdefault", "(", "'dialect'", ",", "'excel-tab'", ")", "return", "fromcsv", "(", "source", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "*", "*", "csvargs", ")" ]
Convenience function, as :func:`petl.io.csv.fromcsv` but with different default dialect (tab delimited).
[ "Convenience", "function", "as", ":", "func", ":", "petl", ".", "io", ".", "csv", ".", "fromcsv", "but", "with", "different", "default", "dialect", "(", "tab", "delimited", ")", "." ]
python
train
33.5
bihealth/vcfpy
vcfpy/header.py
https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/header.py#L277-L295
def _build_indices(self): """Build indices for the different field types""" result = {key: OrderedDict() for key in LINES_WITH_ID} for line in self.lines: if line.key in LINES_WITH_ID: result.setdefault(line.key, OrderedDict()) if line.mapping["ID"] in result[line.key]: warnings.warn( ("Seen {} header more than once: {}, using first" "occurence").format( line.key, line.mapping["ID"] ), DuplicateHeaderLineWarning, ) else: result[line.key][line.mapping["ID"]] = line else: result.setdefault(line.key, []) result[line.key].append(line) return result
[ "def", "_build_indices", "(", "self", ")", ":", "result", "=", "{", "key", ":", "OrderedDict", "(", ")", "for", "key", "in", "LINES_WITH_ID", "}", "for", "line", "in", "self", ".", "lines", ":", "if", "line", ".", "key", "in", "LINES_WITH_ID", ":", "result", ".", "setdefault", "(", "line", ".", "key", ",", "OrderedDict", "(", ")", ")", "if", "line", ".", "mapping", "[", "\"ID\"", "]", "in", "result", "[", "line", ".", "key", "]", ":", "warnings", ".", "warn", "(", "(", "\"Seen {} header more than once: {}, using first\"", "\"occurence\"", ")", ".", "format", "(", "line", ".", "key", ",", "line", ".", "mapping", "[", "\"ID\"", "]", ")", ",", "DuplicateHeaderLineWarning", ",", ")", "else", ":", "result", "[", "line", ".", "key", "]", "[", "line", ".", "mapping", "[", "\"ID\"", "]", "]", "=", "line", "else", ":", "result", ".", "setdefault", "(", "line", ".", "key", ",", "[", "]", ")", "result", "[", "line", ".", "key", "]", ".", "append", "(", "line", ")", "return", "result" ]
Build indices for the different field types
[ "Build", "indices", "for", "the", "different", "field", "types" ]
python
train
43.578947
openfisca/openfisca-core
openfisca_core/populations.py
https://github.com/openfisca/openfisca-core/blob/92ce9396e29ae5d9bac5ea604cfce88517c6b35c/openfisca_core/populations.py#L401-L414
def nb_persons(self, role = None): """ Returns the number of persons contained in the entity. If ``role`` is provided, only the entity member with the given role are taken into account. """ if role: if role.subroles: role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles]) else: role_condition = self.members_role == role return self.sum(role_condition) else: return np.bincount(self.members_entity_id)
[ "def", "nb_persons", "(", "self", ",", "role", "=", "None", ")", ":", "if", "role", ":", "if", "role", ".", "subroles", ":", "role_condition", "=", "np", ".", "logical_or", ".", "reduce", "(", "[", "self", ".", "members_role", "==", "subrole", "for", "subrole", "in", "role", ".", "subroles", "]", ")", "else", ":", "role_condition", "=", "self", ".", "members_role", "==", "role", "return", "self", ".", "sum", "(", "role_condition", ")", "else", ":", "return", "np", ".", "bincount", "(", "self", ".", "members_entity_id", ")" ]
Returns the number of persons contained in the entity. If ``role`` is provided, only the entity member with the given role are taken into account.
[ "Returns", "the", "number", "of", "persons", "contained", "in", "the", "entity", "." ]
python
train
40.642857
icgood/pymap
pymap/selected.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/selected.py#L170-L188
def get_all(self, seq_set: SequenceSet) \ -> Sequence[Tuple[int, CachedMessage]]: """Return the cached messages, and their sequence numbers, for the given sequence set. Args: seq_set: The message sequence set. """ if seq_set.uid: all_uids = seq_set.flatten(self.max_uid) & self._uids return [(seq, self._cache[uid]) for seq, uid in enumerate(self._sorted, 1) if uid in all_uids] else: all_seqs = seq_set.flatten(self.exists) return [(seq, self._cache[uid]) for seq, uid in enumerate(self._sorted, 1) if seq in all_seqs]
[ "def", "get_all", "(", "self", ",", "seq_set", ":", "SequenceSet", ")", "->", "Sequence", "[", "Tuple", "[", "int", ",", "CachedMessage", "]", "]", ":", "if", "seq_set", ".", "uid", ":", "all_uids", "=", "seq_set", ".", "flatten", "(", "self", ".", "max_uid", ")", "&", "self", ".", "_uids", "return", "[", "(", "seq", ",", "self", ".", "_cache", "[", "uid", "]", ")", "for", "seq", ",", "uid", "in", "enumerate", "(", "self", ".", "_sorted", ",", "1", ")", "if", "uid", "in", "all_uids", "]", "else", ":", "all_seqs", "=", "seq_set", ".", "flatten", "(", "self", ".", "exists", ")", "return", "[", "(", "seq", ",", "self", ".", "_cache", "[", "uid", "]", ")", "for", "seq", ",", "uid", "in", "enumerate", "(", "self", ".", "_sorted", ",", "1", ")", "if", "seq", "in", "all_seqs", "]" ]
Return the cached messages, and their sequence numbers, for the given sequence set. Args: seq_set: The message sequence set.
[ "Return", "the", "cached", "messages", "and", "their", "sequence", "numbers", "for", "the", "given", "sequence", "set", "." ]
python
train
37
Divirad/PythonTranslate
pythontranslate/__init__.py
https://github.com/Divirad/PythonTranslate/blob/625e97563bb586692f4a128b284a2aa79f82c279/pythontranslate/__init__.py#L49-L70
def detect_language(self, text: str, hint: str = None): """ Detects the language of a text :param text: Text to analyze :param hint: A list which are hints for the API in which language the text is written in example: "de, en" :return: detected language code. example: "en" """ encodedtext = urllib.parse.quote(text) args = "&text=" + encodedtext if hint is not None: args += "&hint=" + hint r = self.yandex_translate_request("detect", args) self.handle_errors(r) return r.json()["lang"]
[ "def", "detect_language", "(", "self", ",", "text", ":", "str", ",", "hint", ":", "str", "=", "None", ")", ":", "encodedtext", "=", "urllib", ".", "parse", ".", "quote", "(", "text", ")", "args", "=", "\"&text=\"", "+", "encodedtext", "if", "hint", "is", "not", "None", ":", "args", "+=", "\"&hint=\"", "+", "hint", "r", "=", "self", ".", "yandex_translate_request", "(", "\"detect\"", ",", "args", ")", "self", ".", "handle_errors", "(", "r", ")", "return", "r", ".", "json", "(", ")", "[", "\"lang\"", "]" ]
Detects the language of a text :param text: Text to analyze :param hint: A list which are hints for the API in which language the text is written in example: "de, en" :return: detected language code. example: "en"
[ "Detects", "the", "language", "of", "a", "text", ":", "param", "text", ":", "Text", "to", "analyze", ":", "param", "hint", ":", "A", "list", "which", "are", "hints", "for", "the", "API", "in", "which", "language", "the", "text", "is", "written", "in", "example", ":", "de", "en", ":", "return", ":", "detected", "language", "code", ".", "example", ":", "en" ]
python
train
29.727273
cloudendpoints/endpoints-python
endpoints/resource_container.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L103-L122
def add_to_cache(cls, remote_info, container): # pylint: disable=g-bad-name """Adds a ResourceContainer to a cache tying it to a protorpc method. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. container: An instance of ResourceContainer. Raises: TypeError: if the container is not an instance of cls. KeyError: if the remote method has been reference by a container before. This created remote method should never occur because a remote method is created once. """ if not isinstance(container, cls): raise TypeError('%r not an instance of %r, could not be added to cache.' % (container, cls)) if remote_info in cls.__remote_info_cache: raise KeyError('Cache has collision but should not.') cls.__remote_info_cache[remote_info] = container
[ "def", "add_to_cache", "(", "cls", ",", "remote_info", ",", "container", ")", ":", "# pylint: disable=g-bad-name", "if", "not", "isinstance", "(", "container", ",", "cls", ")", ":", "raise", "TypeError", "(", "'%r not an instance of %r, could not be added to cache.'", "%", "(", "container", ",", "cls", ")", ")", "if", "remote_info", "in", "cls", ".", "__remote_info_cache", ":", "raise", "KeyError", "(", "'Cache has collision but should not.'", ")", "cls", ".", "__remote_info_cache", "[", "remote_info", "]", "=", "container" ]
Adds a ResourceContainer to a cache tying it to a protorpc method. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. container: An instance of ResourceContainer. Raises: TypeError: if the container is not an instance of cls. KeyError: if the remote method has been reference by a container before. This created remote method should never occur because a remote method is created once.
[ "Adds", "a", "ResourceContainer", "to", "a", "cache", "tying", "it", "to", "a", "protorpc", "method", "." ]
python
train
44.15
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L481-L497
def __read(self): """ Reads packets from the socket """ # Set the socket as non-blocking self._socket.setblocking(0) while not self._stop_event.is_set(): # Watch for content ready = select.select([self._socket], [], [], 1) if ready[0]: # Socket is ready data, sender = self._socket.recvfrom(1024) try: self._handle_heartbeat(sender, data) except Exception as ex: _logger.exception("Error handling the heart beat: %s", ex)
[ "def", "__read", "(", "self", ")", ":", "# Set the socket as non-blocking", "self", ".", "_socket", ".", "setblocking", "(", "0", ")", "while", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "# Watch for content", "ready", "=", "select", ".", "select", "(", "[", "self", ".", "_socket", "]", ",", "[", "]", ",", "[", "]", ",", "1", ")", "if", "ready", "[", "0", "]", ":", "# Socket is ready", "data", ",", "sender", "=", "self", ".", "_socket", ".", "recvfrom", "(", "1024", ")", "try", ":", "self", ".", "_handle_heartbeat", "(", "sender", ",", "data", ")", "except", "Exception", "as", "ex", ":", "_logger", ".", "exception", "(", "\"Error handling the heart beat: %s\"", ",", "ex", ")" ]
Reads packets from the socket
[ "Reads", "packets", "from", "the", "socket" ]
python
train
34.941176
Kensuke-Mitsuzawa/JapaneseTokenizers
JapaneseTokenizer/mecab_wrapper/mecab_wrapper.py
https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers/blob/3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c/JapaneseTokenizer/mecab_wrapper/mecab_wrapper.py#L92-L111
def __check_mecab_dict_path(self): """check path to dict of Mecab in system environment """ mecab_dic_cmd = "echo `{} --dicdir`".format(os.path.join(self._path_mecab_config, 'mecab-config')) try: if six.PY2: path_mecab_dict = subprocess.check_output( mecab_dic_cmd, shell=True ).strip('\n') else: path_mecab_dict = subprocess.check_output(mecab_dic_cmd, shell=True).decode(self.string_encoding).strip('\n') except subprocess.CalledProcessError: logger.error("{}".format(mecab_dic_cmd)) raise subprocess.CalledProcessError(returncode=-1, cmd="Failed to execute mecab-config command") if path_mecab_dict == '': raise SystemError("""mecab dictionary path is not found with following command: {} You are not able to use additional dictionary. Still you are able to call mecab default dictionary""".format(mecab_dic_cmd)) return path_mecab_dict
[ "def", "__check_mecab_dict_path", "(", "self", ")", ":", "mecab_dic_cmd", "=", "\"echo `{} --dicdir`\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_path_mecab_config", ",", "'mecab-config'", ")", ")", "try", ":", "if", "six", ".", "PY2", ":", "path_mecab_dict", "=", "subprocess", ".", "check_output", "(", "mecab_dic_cmd", ",", "shell", "=", "True", ")", ".", "strip", "(", "'\\n'", ")", "else", ":", "path_mecab_dict", "=", "subprocess", ".", "check_output", "(", "mecab_dic_cmd", ",", "shell", "=", "True", ")", ".", "decode", "(", "self", ".", "string_encoding", ")", ".", "strip", "(", "'\\n'", ")", "except", "subprocess", ".", "CalledProcessError", ":", "logger", ".", "error", "(", "\"{}\"", ".", "format", "(", "mecab_dic_cmd", ")", ")", "raise", "subprocess", ".", "CalledProcessError", "(", "returncode", "=", "-", "1", ",", "cmd", "=", "\"Failed to execute mecab-config command\"", ")", "if", "path_mecab_dict", "==", "''", ":", "raise", "SystemError", "(", "\"\"\"mecab dictionary path is not found with following command: {} \n You are not able to use additional dictionary. \n Still you are able to call mecab default dictionary\"\"\"", ".", "format", "(", "mecab_dic_cmd", ")", ")", "return", "path_mecab_dict" ]
check path to dict of Mecab in system environment
[ "check", "path", "to", "dict", "of", "Mecab", "in", "system", "environment" ]
python
train
49.9
gtaylor/python-route53
route53/transport.py
https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/transport.py#L67-L88
def get_request_headers(self): """ Determine the headers to send along with the request. These are pretty much the same for every request, with Route53. """ date_header = time.asctime(time.gmtime()) # We sign the time string above with the user's AWS secret access key # in order to authenticate our request. signing_key = self._hmac_sign_string(date_header) # Amazon's super fun auth token. auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % ( self.connection._aws_access_key_id, signing_key, ) return { 'X-Amzn-Authorization': auth_header, 'x-amz-date': date_header, 'Host': 'route53.amazonaws.com', }
[ "def", "get_request_headers", "(", "self", ")", ":", "date_header", "=", "time", ".", "asctime", "(", "time", ".", "gmtime", "(", ")", ")", "# We sign the time string above with the user's AWS secret access key", "# in order to authenticate our request.", "signing_key", "=", "self", ".", "_hmac_sign_string", "(", "date_header", ")", "# Amazon's super fun auth token.", "auth_header", "=", "\"AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s\"", "%", "(", "self", ".", "connection", ".", "_aws_access_key_id", ",", "signing_key", ",", ")", "return", "{", "'X-Amzn-Authorization'", ":", "auth_header", ",", "'x-amz-date'", ":", "date_header", ",", "'Host'", ":", "'route53.amazonaws.com'", ",", "}" ]
Determine the headers to send along with the request. These are pretty much the same for every request, with Route53.
[ "Determine", "the", "headers", "to", "send", "along", "with", "the", "request", ".", "These", "are", "pretty", "much", "the", "same", "for", "every", "request", "with", "Route53", "." ]
python
test
35.409091
rflamary/POT
ot/datasets.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/datasets.py#L46-L79
def make_2D_samples_gauss(n, m, sigma, random_state=None): """return n samples drawn from 2D gaussian N(m,sigma) Parameters ---------- n : int number of samples to make m : np.array (2,) mean value of the gaussian distribution sigma : np.array (2,2) covariance matrix of the gaussian distribution random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : np.array (n,2) n samples drawn from N(m,sigma) """ generator = check_random_state(random_state) if np.isscalar(sigma): sigma = np.array([sigma, ]) if len(sigma) > 1: P = sp.linalg.sqrtm(sigma) res = generator.randn(n, 2).dot(P) + m else: res = generator.randn(n, 2) * np.sqrt(sigma) + m return res
[ "def", "make_2D_samples_gauss", "(", "n", ",", "m", ",", "sigma", ",", "random_state", "=", "None", ")", ":", "generator", "=", "check_random_state", "(", "random_state", ")", "if", "np", ".", "isscalar", "(", "sigma", ")", ":", "sigma", "=", "np", ".", "array", "(", "[", "sigma", ",", "]", ")", "if", "len", "(", "sigma", ")", ">", "1", ":", "P", "=", "sp", ".", "linalg", ".", "sqrtm", "(", "sigma", ")", "res", "=", "generator", ".", "randn", "(", "n", ",", "2", ")", ".", "dot", "(", "P", ")", "+", "m", "else", ":", "res", "=", "generator", ".", "randn", "(", "n", ",", "2", ")", "*", "np", ".", "sqrt", "(", "sigma", ")", "+", "m", "return", "res" ]
return n samples drawn from 2D gaussian N(m,sigma) Parameters ---------- n : int number of samples to make m : np.array (2,) mean value of the gaussian distribution sigma : np.array (2,2) covariance matrix of the gaussian distribution random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : np.array (n,2) n samples drawn from N(m,sigma)
[ "return", "n", "samples", "drawn", "from", "2D", "gaussian", "N", "(", "m", "sigma", ")" ]
python
train
30.852941
noxdafox/clipspy
clips/environment.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/environment.py#L188-L202
def define_function(self, function, name=None): """Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct. """ name = name if name is not None else function.__name__ ENVIRONMENT_DATA[self._env].user_functions[name] = function self.build(DEFFUNCTION.format(name))
[ "def", "define_function", "(", "self", ",", "function", ",", "name", "=", "None", ")", ":", "name", "=", "name", "if", "name", "is", "not", "None", "else", "function", ".", "__name__", "ENVIRONMENT_DATA", "[", "self", ".", "_env", "]", ".", "user_functions", "[", "name", "]", "=", "function", "self", ".", "build", "(", "DEFFUNCTION", ".", "format", "(", "name", ")", ")" ]
Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct.
[ "Define", "the", "Python", "function", "within", "the", "CLIPS", "environment", "." ]
python
train
37.666667
zhmcclient/python-zhmcclient
zhmcclient/_cpc.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_cpc.py#L377-L412
def maximum_active_partitions(self): """ Integer: The maximum number of active logical partitions or partitions of this CPC. The following table shows the maximum number of active logical partitions or partitions by machine generations supported at the HMC API: ========================= ================== Machine generation Maximum partitions ========================= ================== z196 60 z114 30 zEC12 60 zBC12 30 z13 / Emperor 85 z13s / Rockhopper 40 z14 / Emperor II 85 z14-ZR1 / Rockhopper II 40 ========================= ================== Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`ValueError`: Unknown machine type """ machine_type = self.get_property('machine-type') try: max_parts = self._MAX_PARTITIONS_BY_MACHINE_TYPE[machine_type] except KeyError: raise ValueError("Unknown machine type: {!r}".format(machine_type)) return max_parts
[ "def", "maximum_active_partitions", "(", "self", ")", ":", "machine_type", "=", "self", ".", "get_property", "(", "'machine-type'", ")", "try", ":", "max_parts", "=", "self", ".", "_MAX_PARTITIONS_BY_MACHINE_TYPE", "[", "machine_type", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Unknown machine type: {!r}\"", ".", "format", "(", "machine_type", ")", ")", "return", "max_parts" ]
Integer: The maximum number of active logical partitions or partitions of this CPC. The following table shows the maximum number of active logical partitions or partitions by machine generations supported at the HMC API: ========================= ================== Machine generation Maximum partitions ========================= ================== z196 60 z114 30 zEC12 60 zBC12 30 z13 / Emperor 85 z13s / Rockhopper 40 z14 / Emperor II 85 z14-ZR1 / Rockhopper II 40 ========================= ================== Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`ValueError`: Unknown machine type
[ "Integer", ":", "The", "maximum", "number", "of", "active", "logical", "partitions", "or", "partitions", "of", "this", "CPC", "." ]
python
train
39.833333
project-ncl/pnc-cli
pnc_cli/tools/scm_utils.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/tools/scm_utils.py#L23-L47
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None): """ Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by default, although it can be requested to read the whole available module structure. :param config: artifact config (ArtifactConfig instance) :param read_modules: if True all modules are read, otherwise only top-level artifact :param repo_url: the URL of the repository to use :param mvn_repo_local: local repository path :param additional_params: additional params to add on command-line when running maven """ global scm_status_cache if config.artifact in scm_status_cache.keys(): result = scm_status_cache[config.artifact] elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()): result = scm_status_cache["%s|False" % config.artifact] else: result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params) if read_modules: scm_status_cache[config.artifact] = result if ("%s|False" % config.artifact) in scm_status_cache.keys(): del(scm_status_cache["%s|False" % config.artifact]) else: scm_status_cache["%s|False" % config.artifact] = result return result
[ "def", "get_scm_status", "(", "config", ",", "read_modules", "=", "False", ",", "repo_url", "=", "None", ",", "mvn_repo_local", "=", "None", ",", "additional_params", "=", "None", ")", ":", "global", "scm_status_cache", "if", "config", ".", "artifact", "in", "scm_status_cache", ".", "keys", "(", ")", ":", "result", "=", "scm_status_cache", "[", "config", ".", "artifact", "]", "elif", "not", "read_modules", "and", "(", "(", "\"%s|False\"", "%", "config", ".", "artifact", ")", "in", "scm_status_cache", ".", "keys", "(", ")", ")", ":", "result", "=", "scm_status_cache", "[", "\"%s|False\"", "%", "config", ".", "artifact", "]", "else", ":", "result", "=", "_get_scm_status", "(", "config", ",", "read_modules", ",", "repo_url", ",", "mvn_repo_local", ",", "additional_params", ")", "if", "read_modules", ":", "scm_status_cache", "[", "config", ".", "artifact", "]", "=", "result", "if", "(", "\"%s|False\"", "%", "config", ".", "artifact", ")", "in", "scm_status_cache", ".", "keys", "(", ")", ":", "del", "(", "scm_status_cache", "[", "\"%s|False\"", "%", "config", ".", "artifact", "]", ")", "else", ":", "scm_status_cache", "[", "\"%s|False\"", "%", "config", ".", "artifact", "]", "=", "result", "return", "result" ]
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by default, although it can be requested to read the whole available module structure. :param config: artifact config (ArtifactConfig instance) :param read_modules: if True all modules are read, otherwise only top-level artifact :param repo_url: the URL of the repository to use :param mvn_repo_local: local repository path :param additional_params: additional params to add on command-line when running maven
[ "Gets", "the", "artifact", "status", "(", "MavenArtifact", "instance", ")", "from", "SCM", "defined", "by", "config", ".", "Only", "the", "top", "-", "level", "artifact", "is", "read", "by", "default", "although", "it", "can", "be", "requested", "to", "read", "the", "whole", "available", "module", "structure", "." ]
python
train
54.76
lsst-sqre/ltd-conveyor
ltdconveyor/keeper/login.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/keeper/login.py#L12-L39
def get_keeper_token(host, username, password): """Get a temporary auth token from LTD Keeper. Parameters ---------- host : `str` Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``). username : `str` Username. password : `str` Password. Returns ------- token : `str` LTD Keeper API token. Raises ------ KeeperError Raised if the LTD Keeper API cannot return a token. """ token_endpoint = urljoin(host, '/token') r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise KeeperError('Could not authenticate to {0}: error {1:d}\n{2}'. format(host, r.status_code, r.json())) return r.json()['token']
[ "def", "get_keeper_token", "(", "host", ",", "username", ",", "password", ")", ":", "token_endpoint", "=", "urljoin", "(", "host", ",", "'/token'", ")", "r", "=", "requests", ".", "get", "(", "token_endpoint", ",", "auth", "=", "(", "username", ",", "password", ")", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "KeeperError", "(", "'Could not authenticate to {0}: error {1:d}\\n{2}'", ".", "format", "(", "host", ",", "r", ".", "status_code", ",", "r", ".", "json", "(", ")", ")", ")", "return", "r", ".", "json", "(", ")", "[", "'token'", "]" ]
Get a temporary auth token from LTD Keeper. Parameters ---------- host : `str` Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``). username : `str` Username. password : `str` Password. Returns ------- token : `str` LTD Keeper API token. Raises ------ KeeperError Raised if the LTD Keeper API cannot return a token.
[ "Get", "a", "temporary", "auth", "token", "from", "LTD", "Keeper", "." ]
python
test
27.357143
cloud-custodian/cloud-custodian
c7n/log.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/log.py#L143-L148
def format_message(self, msg): """format message.""" return {'timestamp': int(msg.created * 1000), 'message': self.format(msg), 'stream': self.log_stream or msg.name, 'group': self.log_group}
[ "def", "format_message", "(", "self", ",", "msg", ")", ":", "return", "{", "'timestamp'", ":", "int", "(", "msg", ".", "created", "*", "1000", ")", ",", "'message'", ":", "self", ".", "format", "(", "msg", ")", ",", "'stream'", ":", "self", ".", "log_stream", "or", "msg", ".", "name", ",", "'group'", ":", "self", ".", "log_group", "}" ]
format message.
[ "format", "message", "." ]
python
train
41.666667
Unidata/MetPy
metpy/interpolate/grid.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/grid.py#L177-L179
def natural_neighbor(xp, yp, variable, grid_x, grid_y): """Wrap natural_neighbor_to_grid for deprecated natural_neighbor function.""" return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y)
[ "def", "natural_neighbor", "(", "xp", ",", "yp", ",", "variable", ",", "grid_x", ",", "grid_y", ")", ":", "return", "natural_neighbor_to_grid", "(", "xp", ",", "yp", ",", "variable", ",", "grid_x", ",", "grid_y", ")" ]
Wrap natural_neighbor_to_grid for deprecated natural_neighbor function.
[ "Wrap", "natural_neighbor_to_grid", "for", "deprecated", "natural_neighbor", "function", "." ]
python
train
68.333333
singularityhub/sregistry-cli
sregistry/utils/recipes.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/utils/recipes.py#L104-L148
def find_folder_recipes(base_folder, pattern="Singularity", manifest=None, base=None): '''find folder recipes will find recipes based on a particular pattern. Parameters ========== base_folder: the base folder to recursively walk pattern: a default pattern to search for manifest: an already started manifest base: if defined, consider folders under this level recursively. ''' # The user is not appending to an existing manifest if manifest is None: manifest = dict() for root, dirnames, filenames in os.walk(base_folder): for filename in fnmatch.filter(filenames, pattern): container_path = os.path.join(root, filename) if base is not None: container_base = container_path.replace(base,'').strip('/') collection = container_base.split('/')[0] recipe = os.path.basename(container_base) container_uri = "%s/%s" %(collection,recipe) else: container_uri = '/'.join(container_path.strip('/').split('/')[-2:]) add_container = True # Add the most recently updated container if container_uri in manifest: if manifest[container_uri]['modified'] > os.path.getmtime(container_path): add_container = False if add_container: manifest[container_uri] = {'path': os.path.abspath(container_path), 'modified':os.path.getmtime(container_path)} return manifest
[ "def", "find_folder_recipes", "(", "base_folder", ",", "pattern", "=", "\"Singularity\"", ",", "manifest", "=", "None", ",", "base", "=", "None", ")", ":", "# The user is not appending to an existing manifest", "if", "manifest", "is", "None", ":", "manifest", "=", "dict", "(", ")", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "base_folder", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "pattern", ")", ":", "container_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "if", "base", "is", "not", "None", ":", "container_base", "=", "container_path", ".", "replace", "(", "base", ",", "''", ")", ".", "strip", "(", "'/'", ")", "collection", "=", "container_base", ".", "split", "(", "'/'", ")", "[", "0", "]", "recipe", "=", "os", ".", "path", ".", "basename", "(", "container_base", ")", "container_uri", "=", "\"%s/%s\"", "%", "(", "collection", ",", "recipe", ")", "else", ":", "container_uri", "=", "'/'", ".", "join", "(", "container_path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "2", ":", "]", ")", "add_container", "=", "True", "# Add the most recently updated container", "if", "container_uri", "in", "manifest", ":", "if", "manifest", "[", "container_uri", "]", "[", "'modified'", "]", ">", "os", ".", "path", ".", "getmtime", "(", "container_path", ")", ":", "add_container", "=", "False", "if", "add_container", ":", "manifest", "[", "container_uri", "]", "=", "{", "'path'", ":", "os", ".", "path", ".", "abspath", "(", "container_path", ")", ",", "'modified'", ":", "os", ".", "path", ".", "getmtime", "(", "container_path", ")", "}", "return", "manifest" ]
find folder recipes will find recipes based on a particular pattern. Parameters ========== base_folder: the base folder to recursively walk pattern: a default pattern to search for manifest: an already started manifest base: if defined, consider folders under this level recursively.
[ "find", "folder", "recipes", "will", "find", "recipes", "based", "on", "a", "particular", "pattern", ".", "Parameters", "==========", "base_folder", ":", "the", "base", "folder", "to", "recursively", "walk", "pattern", ":", "a", "default", "pattern", "to", "search", "for", "manifest", ":", "an", "already", "started", "manifest", "base", ":", "if", "defined", "consider", "folders", "under", "this", "level", "recursively", "." ]
python
test
36.355556
saltstack/salt
salt/cloud/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L912-L926
def provider_list(self, lookup='all'): ''' Return a mapping of all image data for available providers ''' data = {} lookups = self.lookup_providers(lookup) if not lookups: return data for alias, driver in lookups: if alias not in data: data[alias] = {} if driver not in data[alias]: data[alias][driver] = {} return data
[ "def", "provider_list", "(", "self", ",", "lookup", "=", "'all'", ")", ":", "data", "=", "{", "}", "lookups", "=", "self", ".", "lookup_providers", "(", "lookup", ")", "if", "not", "lookups", ":", "return", "data", "for", "alias", ",", "driver", "in", "lookups", ":", "if", "alias", "not", "in", "data", ":", "data", "[", "alias", "]", "=", "{", "}", "if", "driver", "not", "in", "data", "[", "alias", "]", ":", "data", "[", "alias", "]", "[", "driver", "]", "=", "{", "}", "return", "data" ]
Return a mapping of all image data for available providers
[ "Return", "a", "mapping", "of", "all", "image", "data", "for", "available", "providers" ]
python
train
29.2
GNS3/gns3-server
gns3server/controller/compute.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/compute.py#L640-L646
def list_files(self, project): """ List files in the project on computes """ path = "/projects/{}/files".format(project.id) res = yield from self.http_query("GET", path, timeout=120) return res.json
[ "def", "list_files", "(", "self", ",", "project", ")", ":", "path", "=", "\"/projects/{}/files\"", ".", "format", "(", "project", ".", "id", ")", "res", "=", "yield", "from", "self", ".", "http_query", "(", "\"GET\"", ",", "path", ",", "timeout", "=", "120", ")", "return", "res", ".", "json" ]
List files in the project on computes
[ "List", "files", "in", "the", "project", "on", "computes" ]
python
train
34.285714
wandb/client
wandb/vendor/prompt_toolkit/document.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/document.py#L986-L1001
def insert_before(self, text): """ Create a new document, with this text inserted before the buffer. It keeps selection ranges and cursor position in sync. """ selection_state = self.selection if selection_state: selection_state = SelectionState( original_cursor_position=selection_state.original_cursor_position + len(text), type=selection_state.type) return Document( text=text + self.text, cursor_position=self.cursor_position + len(text), selection=selection_state)
[ "def", "insert_before", "(", "self", ",", "text", ")", ":", "selection_state", "=", "self", ".", "selection", "if", "selection_state", ":", "selection_state", "=", "SelectionState", "(", "original_cursor_position", "=", "selection_state", ".", "original_cursor_position", "+", "len", "(", "text", ")", ",", "type", "=", "selection_state", ".", "type", ")", "return", "Document", "(", "text", "=", "text", "+", "self", ".", "text", ",", "cursor_position", "=", "self", ".", "cursor_position", "+", "len", "(", "text", ")", ",", "selection", "=", "selection_state", ")" ]
Create a new document, with this text inserted before the buffer. It keeps selection ranges and cursor position in sync.
[ "Create", "a", "new", "document", "with", "this", "text", "inserted", "before", "the", "buffer", ".", "It", "keeps", "selection", "ranges", "and", "cursor", "position", "in", "sync", "." ]
python
train
37.75
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L592-L606
def auto_up(self, count=1, go_to_start_of_line_if_history_changes=False): """ If we're not on the first line (of a multiline input) go a line up, otherwise go back in history. (If nothing is selected.) """ if self.complete_state: self.complete_previous(count=count) elif self.document.cursor_position_row > 0: self.cursor_up(count=count) elif not self.selection_state: self.history_backward(count=count) # Go to the start of the line? if go_to_start_of_line_if_history_changes: self.cursor_position += self.document.get_start_of_line_position()
[ "def", "auto_up", "(", "self", ",", "count", "=", "1", ",", "go_to_start_of_line_if_history_changes", "=", "False", ")", ":", "if", "self", ".", "complete_state", ":", "self", ".", "complete_previous", "(", "count", "=", "count", ")", "elif", "self", ".", "document", ".", "cursor_position_row", ">", "0", ":", "self", ".", "cursor_up", "(", "count", "=", "count", ")", "elif", "not", "self", ".", "selection_state", ":", "self", ".", "history_backward", "(", "count", "=", "count", ")", "# Go to the start of the line?", "if", "go_to_start_of_line_if_history_changes", ":", "self", ".", "cursor_position", "+=", "self", ".", "document", ".", "get_start_of_line_position", "(", ")" ]
If we're not on the first line (of a multiline input) go a line up, otherwise go back in history. (If nothing is selected.)
[ "If", "we", "re", "not", "on", "the", "first", "line", "(", "of", "a", "multiline", "input", ")", "go", "a", "line", "up", "otherwise", "go", "back", "in", "history", ".", "(", "If", "nothing", "is", "selected", ".", ")" ]
python
train
44.2
tensorforce/tensorforce
tensorforce/execution/threaded_runner.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/execution/threaded_runner.py#L88-L186
def run( self, num_episodes=-1, max_episode_timesteps=-1, episode_finished=None, summary_report=None, summary_interval=0, num_timesteps=None, deterministic=False, episodes=None, max_timesteps=None, testing=False, sleep=None ): """ Executes this runner by starting all Agents in parallel (each one in one thread). Args: episodes (int): Deprecated; see num_episodes. max_timesteps (int): Deprecated; see max_episode_timesteps. """ # Renamed episodes into num_episodes to match BaseRunner's signature (fully backw. compatible). if episodes is not None: num_episodes = episodes warnings.warn("WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.", category=DeprecationWarning) assert isinstance(num_episodes, int) # Renamed max_timesteps into max_episode_timesteps to match single Runner's signature (fully backw. compatible). if max_timesteps is not None: max_episode_timesteps = max_timesteps warnings.warn("WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.", category=DeprecationWarning) assert isinstance(max_episode_timesteps, int) if summary_report is not None: warnings.warn("WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback " "instead to generate summaries every n episodes.", category=DeprecationWarning) self.reset() # Reset counts/stop-condition for this run. self.global_episode = 0 self.global_timestep = 0 self.should_stop = False # Create threads. threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],), kwargs={"deterministic": deterministic, "max_episode_timesteps": max_episode_timesteps, "episode_finished": episode_finished, "testing": testing, "sleep": sleep}) for t in range(len(self.agent))] # Start threads. self.start_time = time.time() [t.start() for t in threads] # Stay idle until killed by SIGINT or a global stop condition is met. try: next_summary = 0 next_save = 0 if self.save_frequency_unit != "s" else time.time() while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1: self.time = time.time() # This is deprecated (but still supported) and should be covered by the `episode_finished` callable. if summary_report is not None and self.global_episode > next_summary: summary_report(self) next_summary += summary_interval if self.save_path and self.save_frequency is not None: do_save = True current = None if self.save_frequency_unit == "e" and self.global_episode > next_save: current = self.global_episode elif self.save_frequency_unit == "s" and self.time > next_save: current = self.time elif self.save_frequency_unit == "t" and self.global_timestep > next_save: current = self.global_timestep else: do_save = False if do_save: self.agent[0].save_model(self.save_path) # Make sure next save is later than right now. while next_save < current: next_save += self.save_frequency time.sleep(1) except KeyboardInterrupt: print('Keyboard interrupt, sending stop command to threads') self.should_stop = True # Join threads. [t.join() for t in threads] print('All threads stopped')
[ "def", "run", "(", "self", ",", "num_episodes", "=", "-", "1", ",", "max_episode_timesteps", "=", "-", "1", ",", "episode_finished", "=", "None", ",", "summary_report", "=", "None", ",", "summary_interval", "=", "0", ",", "num_timesteps", "=", "None", ",", "deterministic", "=", "False", ",", "episodes", "=", "None", ",", "max_timesteps", "=", "None", ",", "testing", "=", "False", ",", "sleep", "=", "None", ")", ":", "# Renamed episodes into num_episodes to match BaseRunner's signature (fully backw. compatible).", "if", "episodes", "is", "not", "None", ":", "num_episodes", "=", "episodes", "warnings", ".", "warn", "(", "\"WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.\"", ",", "category", "=", "DeprecationWarning", ")", "assert", "isinstance", "(", "num_episodes", ",", "int", ")", "# Renamed max_timesteps into max_episode_timesteps to match single Runner's signature (fully backw. compatible).", "if", "max_timesteps", "is", "not", "None", ":", "max_episode_timesteps", "=", "max_timesteps", "warnings", ".", "warn", "(", "\"WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.\"", ",", "category", "=", "DeprecationWarning", ")", "assert", "isinstance", "(", "max_episode_timesteps", ",", "int", ")", "if", "summary_report", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback \"", "\"instead to generate summaries every n episodes.\"", ",", "category", "=", "DeprecationWarning", ")", "self", ".", "reset", "(", ")", "# Reset counts/stop-condition for this run.", "self", ".", "global_episode", "=", "0", "self", ".", "global_timestep", "=", "0", "self", ".", "should_stop", "=", "False", "# Create threads.", "threads", "=", "[", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_single", ",", "args", "=", "(", "t", ",", "self", ".", "agent", "[", "t", "]", ",", "self", ".", "environment", "[", "t", "]", ",", ")", ",", "kwargs", "=", "{", "\"deterministic\"", ":", "deterministic", ",", "\"max_episode_timesteps\"", ":", "max_episode_timesteps", ",", "\"episode_finished\"", ":", "episode_finished", ",", "\"testing\"", ":", "testing", ",", "\"sleep\"", ":", "sleep", "}", ")", "for", "t", "in", "range", "(", "len", "(", "self", ".", "agent", ")", ")", "]", "# Start threads.", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "[", "t", ".", "start", "(", ")", "for", "t", "in", "threads", "]", "# Stay idle until killed by SIGINT or a global stop condition is met.", "try", ":", "next_summary", "=", "0", "next_save", "=", "0", "if", "self", ".", "save_frequency_unit", "!=", "\"s\"", "else", "time", ".", "time", "(", ")", "while", "any", "(", "[", "t", ".", "is_alive", "(", ")", "for", "t", "in", "threads", "]", ")", "and", "self", ".", "global_episode", "<", "num_episodes", "or", "num_episodes", "==", "-", "1", ":", "self", ".", "time", "=", "time", ".", "time", "(", ")", "# This is deprecated (but still supported) and should be covered by the `episode_finished` callable.", "if", "summary_report", "is", "not", "None", "and", "self", ".", "global_episode", ">", "next_summary", ":", "summary_report", "(", "self", ")", "next_summary", "+=", "summary_interval", "if", "self", ".", "save_path", "and", "self", ".", "save_frequency", "is", "not", "None", ":", "do_save", "=", "True", "current", "=", "None", "if", "self", ".", "save_frequency_unit", "==", "\"e\"", "and", "self", ".", "global_episode", ">", "next_save", ":", "current", "=", "self", ".", "global_episode", "elif", "self", ".", "save_frequency_unit", "==", "\"s\"", "and", "self", ".", "time", ">", "next_save", ":", "current", "=", "self", ".", "time", "elif", "self", ".", "save_frequency_unit", "==", "\"t\"", "and", "self", ".", "global_timestep", ">", "next_save", ":", "current", "=", "self", ".", "global_timestep", "else", ":", "do_save", "=", "False", "if", "do_save", ":", "self", ".", "agent", "[", "0", "]", ".", "save_model", "(", "self", ".", "save_path", ")", "# Make sure next save is later than right now.", "while", "next_save", "<", "current", ":", "next_save", "+=", "self", ".", "save_frequency", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'Keyboard interrupt, sending stop command to threads'", ")", "self", ".", "should_stop", "=", "True", "# Join threads.", "[", "t", ".", "join", "(", ")", "for", "t", "in", "threads", "]", "print", "(", "'All threads stopped'", ")" ]
Executes this runner by starting all Agents in parallel (each one in one thread). Args: episodes (int): Deprecated; see num_episodes. max_timesteps (int): Deprecated; see max_episode_timesteps.
[ "Executes", "this", "runner", "by", "starting", "all", "Agents", "in", "parallel", "(", "each", "one", "in", "one", "thread", ")", "." ]
python
valid
42.959596
hardbyte/python-can
can/interfaces/systec/ucanbus.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucanbus.py#L206-L214
def flush_tx_buffer(self): """ Flushes the transmit buffer. :raises can.CanError: If flushing of the transmit buffer failed. """ log.info('Flushing transmit buffer') self._ucan.reset_can(self.channel, ResetFlags.RESET_ONLY_TX_BUFF)
[ "def", "flush_tx_buffer", "(", "self", ")", ":", "log", ".", "info", "(", "'Flushing transmit buffer'", ")", "self", ".", "_ucan", ".", "reset_can", "(", "self", ".", "channel", ",", "ResetFlags", ".", "RESET_ONLY_TX_BUFF", ")" ]
Flushes the transmit buffer. :raises can.CanError: If flushing of the transmit buffer failed.
[ "Flushes", "the", "transmit", "buffer", "." ]
python
train
31.555556
Unity-Technologies/ml-agents
ml-agents-envs/mlagents/envs/brain.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents-envs/mlagents/envs/brain.py#L85-L138
def from_agent_proto(agent_info_list, brain_params): """ Converts list of agent infos to BrainInfo. """ vis_obs = [] for i in range(brain_params.number_visual_observations): obs = [BrainInfo.process_pixels(x.visual_observations[i], brain_params.camera_resolutions[i]['blackAndWhite']) for x in agent_info_list] vis_obs += [obs] if len(agent_info_list) == 0: memory_size = 0 else: memory_size = max([len(x.memories) for x in agent_info_list]) if memory_size == 0: memory = np.zeros((0, 0)) else: [x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list] memory = np.array([list(x.memories) for x in agent_info_list]) total_num_actions = sum(brain_params.vector_action_space_size) mask_actions = np.ones((len(agent_info_list), total_num_actions)) for agent_index, agent_info in enumerate(agent_info_list): if agent_info.action_mask is not None: if len(agent_info.action_mask) == total_num_actions: mask_actions[agent_index, :] = [ 0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)] if any([np.isnan(x.reward) for x in agent_info_list]): logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name) if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]): logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name) if len(agent_info_list) == 0: vector_obs = np.zeros( (0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations) ) else: vector_obs = np.nan_to_num( np.array([x.stacked_vector_observation for x in agent_info_list]) ) brain_info = BrainInfo( visual_observation=vis_obs, vector_observation=vector_obs, text_observations=[x.text_observation for x in agent_info_list], memory=memory, reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list], agents=[x.id for x in agent_info_list], local_done=[x.done for x in agent_info_list], vector_action=np.array([x.stored_vector_actions for x in agent_info_list]), text_action=[list(x.stored_text_actions) for x in agent_info_list], max_reached=[x.max_step_reached for x in agent_info_list], custom_observations=[x.custom_observation for x in agent_info_list], action_mask=mask_actions ) return brain_info
[ "def", "from_agent_proto", "(", "agent_info_list", ",", "brain_params", ")", ":", "vis_obs", "=", "[", "]", "for", "i", "in", "range", "(", "brain_params", ".", "number_visual_observations", ")", ":", "obs", "=", "[", "BrainInfo", ".", "process_pixels", "(", "x", ".", "visual_observations", "[", "i", "]", ",", "brain_params", ".", "camera_resolutions", "[", "i", "]", "[", "'blackAndWhite'", "]", ")", "for", "x", "in", "agent_info_list", "]", "vis_obs", "+=", "[", "obs", "]", "if", "len", "(", "agent_info_list", ")", "==", "0", ":", "memory_size", "=", "0", "else", ":", "memory_size", "=", "max", "(", "[", "len", "(", "x", ".", "memories", ")", "for", "x", "in", "agent_info_list", "]", ")", "if", "memory_size", "==", "0", ":", "memory", "=", "np", ".", "zeros", "(", "(", "0", ",", "0", ")", ")", "else", ":", "[", "x", ".", "memories", ".", "extend", "(", "[", "0", "]", "*", "(", "memory_size", "-", "len", "(", "x", ".", "memories", ")", ")", ")", "for", "x", "in", "agent_info_list", "]", "memory", "=", "np", ".", "array", "(", "[", "list", "(", "x", ".", "memories", ")", "for", "x", "in", "agent_info_list", "]", ")", "total_num_actions", "=", "sum", "(", "brain_params", ".", "vector_action_space_size", ")", "mask_actions", "=", "np", ".", "ones", "(", "(", "len", "(", "agent_info_list", ")", ",", "total_num_actions", ")", ")", "for", "agent_index", ",", "agent_info", "in", "enumerate", "(", "agent_info_list", ")", ":", "if", "agent_info", ".", "action_mask", "is", "not", "None", ":", "if", "len", "(", "agent_info", ".", "action_mask", ")", "==", "total_num_actions", ":", "mask_actions", "[", "agent_index", ",", ":", "]", "=", "[", "0", "if", "agent_info", ".", "action_mask", "[", "k", "]", "else", "1", "for", "k", "in", "range", "(", "total_num_actions", ")", "]", "if", "any", "(", "[", "np", ".", "isnan", "(", "x", ".", "reward", ")", "for", "x", "in", "agent_info_list", "]", ")", ":", "logger", ".", "warning", "(", "\"An agent had a NaN reward for brain \"", "+", "brain_params", ".", "brain_name", ")", "if", "any", "(", "[", "np", ".", "isnan", "(", "x", ".", "stacked_vector_observation", ")", ".", "any", "(", ")", "for", "x", "in", "agent_info_list", "]", ")", ":", "logger", ".", "warning", "(", "\"An agent had a NaN observation for brain \"", "+", "brain_params", ".", "brain_name", ")", "if", "len", "(", "agent_info_list", ")", "==", "0", ":", "vector_obs", "=", "np", ".", "zeros", "(", "(", "0", ",", "brain_params", ".", "vector_observation_space_size", "*", "brain_params", ".", "num_stacked_vector_observations", ")", ")", "else", ":", "vector_obs", "=", "np", ".", "nan_to_num", "(", "np", ".", "array", "(", "[", "x", ".", "stacked_vector_observation", "for", "x", "in", "agent_info_list", "]", ")", ")", "brain_info", "=", "BrainInfo", "(", "visual_observation", "=", "vis_obs", ",", "vector_observation", "=", "vector_obs", ",", "text_observations", "=", "[", "x", ".", "text_observation", "for", "x", "in", "agent_info_list", "]", ",", "memory", "=", "memory", ",", "reward", "=", "[", "x", ".", "reward", "if", "not", "np", ".", "isnan", "(", "x", ".", "reward", ")", "else", "0", "for", "x", "in", "agent_info_list", "]", ",", "agents", "=", "[", "x", ".", "id", "for", "x", "in", "agent_info_list", "]", ",", "local_done", "=", "[", "x", ".", "done", "for", "x", "in", "agent_info_list", "]", ",", "vector_action", "=", "np", ".", "array", "(", "[", "x", ".", "stored_vector_actions", "for", "x", "in", "agent_info_list", "]", ")", ",", "text_action", "=", "[", "list", "(", "x", ".", "stored_text_actions", ")", "for", "x", "in", "agent_info_list", "]", ",", "max_reached", "=", "[", "x", ".", "max_step_reached", "for", "x", "in", "agent_info_list", "]", ",", "custom_observations", "=", "[", "x", ".", "custom_observation", "for", "x", "in", "agent_info_list", "]", ",", "action_mask", "=", "mask_actions", ")", "return", "brain_info" ]
Converts list of agent infos to BrainInfo.
[ "Converts", "list", "of", "agent", "infos", "to", "BrainInfo", "." ]
python
train
51.703704
ioos/compliance-checker
compliance_checker/ioos.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/ioos.py#L311-L322
def check_geophysical_vars_fill_value(self, ds): ''' Check that geophysical variables contain fill values. :param netCDF4.Dataset ds: An open netCDF dataset ''' results = [] for geo_var in get_geophysical_variables(ds): results.append( self._has_var_attr(ds, geo_var, '_FillValue', '_FillValue', BaseCheck.MEDIUM), ) return results
[ "def", "check_geophysical_vars_fill_value", "(", "self", ",", "ds", ")", ":", "results", "=", "[", "]", "for", "geo_var", "in", "get_geophysical_variables", "(", "ds", ")", ":", "results", ".", "append", "(", "self", ".", "_has_var_attr", "(", "ds", ",", "geo_var", ",", "'_FillValue'", ",", "'_FillValue'", ",", "BaseCheck", ".", "MEDIUM", ")", ",", ")", "return", "results" ]
Check that geophysical variables contain fill values. :param netCDF4.Dataset ds: An open netCDF dataset
[ "Check", "that", "geophysical", "variables", "contain", "fill", "values", "." ]
python
train
34.75
waleedka/hiddenlayer
hiddenlayer/canvas.py
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/hiddenlayer/canvas.py#L161-L169
def draw_summary(self, history, title=""): """Inserts a text summary at the top that lists the number of steps and total training time.""" # Generate summary string time_str = str(history.get_total_time()).split(".")[0] # remove microseconds summary = "Step: {} Time: {}".format(history.step, time_str) if title: summary = title + "\n\n" + summary self.figure.suptitle(summary)
[ "def", "draw_summary", "(", "self", ",", "history", ",", "title", "=", "\"\"", ")", ":", "# Generate summary string", "time_str", "=", "str", "(", "history", ".", "get_total_time", "(", ")", ")", ".", "split", "(", "\".\"", ")", "[", "0", "]", "# remove microseconds", "summary", "=", "\"Step: {} Time: {}\"", ".", "format", "(", "history", ".", "step", ",", "time_str", ")", "if", "title", ":", "summary", "=", "title", "+", "\"\\n\\n\"", "+", "summary", "self", ".", "figure", ".", "suptitle", "(", "summary", ")" ]
Inserts a text summary at the top that lists the number of steps and total training time.
[ "Inserts", "a", "text", "summary", "at", "the", "top", "that", "lists", "the", "number", "of", "steps", "and", "total", "training", "time", "." ]
python
train
49.222222
andrea-cuttone/geoplotlib
geoplotlib/colors.py
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/colors.py#L92-L108
def create_set_cmap(values, cmap_name, alpha=255): """ return a dict of colors corresponding to the unique values :param values: values to be mapped :param cmap_name: colormap name :param alpha: color alpha :return: dict of colors corresponding to the unique values """ unique_values = list(set(values)) shuffle(unique_values) from pylab import get_cmap cmap = get_cmap(cmap_name) d = {} for i in range(len(unique_values)): d[unique_values[i]] = _convert_color_format(cmap(1.*i/len(unique_values)), alpha) return d
[ "def", "create_set_cmap", "(", "values", ",", "cmap_name", ",", "alpha", "=", "255", ")", ":", "unique_values", "=", "list", "(", "set", "(", "values", ")", ")", "shuffle", "(", "unique_values", ")", "from", "pylab", "import", "get_cmap", "cmap", "=", "get_cmap", "(", "cmap_name", ")", "d", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "unique_values", ")", ")", ":", "d", "[", "unique_values", "[", "i", "]", "]", "=", "_convert_color_format", "(", "cmap", "(", "1.", "*", "i", "/", "len", "(", "unique_values", ")", ")", ",", "alpha", ")", "return", "d" ]
return a dict of colors corresponding to the unique values :param values: values to be mapped :param cmap_name: colormap name :param alpha: color alpha :return: dict of colors corresponding to the unique values
[ "return", "a", "dict", "of", "colors", "corresponding", "to", "the", "unique", "values", ":", "param", "values", ":", "values", "to", "be", "mapped", ":", "param", "cmap_name", ":", "colormap", "name", ":", "param", "alpha", ":", "color", "alpha", ":", "return", ":", "dict", "of", "colors", "corresponding", "to", "the", "unique", "values" ]
python
train
33.352941
ThreatConnect-Inc/tcex
tcex/tcex_bin.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin.py#L109-L118
def handle_error(err, halt=True): """Print errors message and optionally exit. Args: err (str): The error message to print. halt (bool, optional): Defaults to True. If True the script will exit. """ print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err)) if halt: sys.exit(1)
[ "def", "handle_error", "(", "err", ",", "halt", "=", "True", ")", ":", "print", "(", "'{}{}{}'", ".", "format", "(", "c", ".", "Style", ".", "BRIGHT", ",", "c", ".", "Fore", ".", "RED", ",", "err", ")", ")", "if", "halt", ":", "sys", ".", "exit", "(", "1", ")" ]
Print errors message and optionally exit. Args: err (str): The error message to print. halt (bool, optional): Defaults to True. If True the script will exit.
[ "Print", "errors", "message", "and", "optionally", "exit", "." ]
python
train
34.3
saltstack/salt
salt/modules/timezone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L378-L488
def get_hwclock(): ''' Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock ''' if salt.utils.path.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in ret['stdout'].splitlines()): if 'rtc in local tz' in line.lower(): try: if line.split(':')[-1].strip().lower() == 'yes': return 'localtime' else: return 'UTC' except IndexError: pass msg = ('Failed to parse timedatectl output: {0}\n' 'Please file an issue with SaltStack').format(ret['stdout']) raise CommandExecutionError(msg) else: os_family = __grains__['os_family'] for family in ('RedHat', 'Suse', 'NILinuxRT'): if family in os_family: return _get_adjtime_timezone() if 'Debian' in __grains__['os_family']: # Original way to look up hwclock on Debian-based systems try: with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if re.match(r'^\s*#', line): continue if 'UTC=' in line: is_utc = line.rstrip('\n').split('=')[-1].lower() if is_utc == 'yes': return 'UTC' else: return 'localtime' except IOError as exc: pass # Since Wheezy return _get_adjtime_timezone() if 'Gentoo' in __grains__['os_family']: if not os.path.exists('/etc/adjtime'): offset_file = '/etc/conf.d/hwclock' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('clock='): line = line.rstrip('\n') line = line.split('=')[-1].strip('\'"') if line == 'UTC': return line if line == 'local': return 'LOCAL' raise CommandExecutionError( 'Correct offset value not found in {0}' .format(offset_file) ) except IOError as exc: raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) ) return _get_adjtime_timezone() if 'Solaris' in __grains__['os_family']: offset_file = '/etc/rtc_config' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('zone_info=GMT'): return 'UTC' return 'localtime' except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) ) if 'AIX' in __grains__['os_family']: offset_file = '/etc/environment' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('TZ=UTC'): return 'UTC' return 'localtime' except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) )
[ "def", "get_hwclock", "(", ")", ":", "if", "salt", ".", "utils", ".", "path", ".", "which", "(", "'timedatectl'", ")", ":", "ret", "=", "_timedatectl", "(", ")", "for", "line", "in", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "ret", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ")", ":", "if", "'rtc in local tz'", "in", "line", ".", "lower", "(", ")", ":", "try", ":", "if", "line", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'yes'", ":", "return", "'localtime'", "else", ":", "return", "'UTC'", "except", "IndexError", ":", "pass", "msg", "=", "(", "'Failed to parse timedatectl output: {0}\\n'", "'Please file an issue with SaltStack'", ")", ".", "format", "(", "ret", "[", "'stdout'", "]", ")", "raise", "CommandExecutionError", "(", "msg", ")", "else", ":", "os_family", "=", "__grains__", "[", "'os_family'", "]", "for", "family", "in", "(", "'RedHat'", ",", "'Suse'", ",", "'NILinuxRT'", ")", ":", "if", "family", "in", "os_family", ":", "return", "_get_adjtime_timezone", "(", ")", "if", "'Debian'", "in", "__grains__", "[", "'os_family'", "]", ":", "# Original way to look up hwclock on Debian-based systems", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/etc/default/rcS'", ",", "'r'", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "re", ".", "match", "(", "r'^\\s*#'", ",", "line", ")", ":", "continue", "if", "'UTC='", "in", "line", ":", "is_utc", "=", "line", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "'='", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "if", "is_utc", "==", "'yes'", ":", "return", "'UTC'", "else", ":", "return", "'localtime'", "except", "IOError", "as", "exc", ":", "pass", "# Since Wheezy", "return", "_get_adjtime_timezone", "(", ")", "if", "'Gentoo'", "in", "__grains__", "[", "'os_family'", "]", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "'/etc/adjtime'", ")", ":", "offset_file", "=", "'/etc/conf.d/hwclock'", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "offset_file", ",", "'r'", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "line", ".", "startswith", "(", "'clock='", ")", ":", "line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "line", "=", "line", ".", "split", "(", "'='", ")", "[", "-", "1", "]", ".", "strip", "(", "'\\'\"'", ")", "if", "line", "==", "'UTC'", ":", "return", "line", "if", "line", "==", "'local'", ":", "return", "'LOCAL'", "raise", "CommandExecutionError", "(", "'Correct offset value not found in {0}'", ".", "format", "(", "offset_file", ")", ")", "except", "IOError", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Problem reading offset file {0}: {1}'", ".", "format", "(", "offset_file", ",", "exc", ".", "strerror", ")", ")", "return", "_get_adjtime_timezone", "(", ")", "if", "'Solaris'", "in", "__grains__", "[", "'os_family'", "]", ":", "offset_file", "=", "'/etc/rtc_config'", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "offset_file", ",", "'r'", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "line", ".", "startswith", "(", "'zone_info=GMT'", ")", ":", "return", "'UTC'", "return", "'localtime'", "except", "IOError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "ENOENT", ":", "# offset file does not exist", "return", "'UTC'", "raise", "CommandExecutionError", "(", "'Problem reading offset file {0}: {1}'", ".", "format", "(", "offset_file", ",", "exc", ".", "strerror", ")", ")", "if", "'AIX'", "in", "__grains__", "[", "'os_family'", "]", ":", "offset_file", "=", "'/etc/environment'", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "offset_file", ",", "'r'", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "line", ".", "startswith", "(", "'TZ=UTC'", ")", ":", "return", "'UTC'", "return", "'localtime'", "except", "IOError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "ENOENT", ":", "# offset file does not exist", "return", "'UTC'", "raise", "CommandExecutionError", "(", "'Problem reading offset file {0}: {1}'", ".", "format", "(", "offset_file", ",", "exc", ".", "strerror", ")", ")" ]
Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock
[ "Get", "current", "hardware", "clock", "setting", "(", "UTC", "or", "localtime", ")" ]
python
train
40.288288
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L337-L351
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_flogi(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_rx_flogi = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-flogi") fcoe_intf_rx_flogi.text = kwargs.pop('fcoe_intf_rx_flogi') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_flogi", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_interface", "=", "ET", ".", "Element", "(", "\"fcoe_get_interface\"", ")", "config", "=", "fcoe_get_interface", "output", "=", "ET", ".", "SubElement", "(", "fcoe_get_interface", ",", "\"output\"", ")", "fcoe_intf_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fcoe-intf-list\"", ")", "fcoe_intf_fcoe_port_id_key", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-fcoe-port-id\"", ")", "fcoe_intf_fcoe_port_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_fcoe_port_id'", ")", "fcoe_intf_rx_flogi", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-rx-flogi\"", ")", "fcoe_intf_rx_flogi", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_rx_flogi'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.133333
gc3-uzh-ch/elasticluster
elasticluster/providers/ec2_boto.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/ec2_boto.py#L498-L510
def _build_cached_instances(self): """ Build lookup table of VM instances known to the cloud provider. The returned dictionary links VM id with the actual VM object. """ connection = self._connect() reservations = connection.get_all_reservations() cached_instances = {} for rs in reservations: for vm in rs.instances: cached_instances[vm.id] = vm return cached_instances
[ "def", "_build_cached_instances", "(", "self", ")", ":", "connection", "=", "self", ".", "_connect", "(", ")", "reservations", "=", "connection", ".", "get_all_reservations", "(", ")", "cached_instances", "=", "{", "}", "for", "rs", "in", "reservations", ":", "for", "vm", "in", "rs", ".", "instances", ":", "cached_instances", "[", "vm", ".", "id", "]", "=", "vm", "return", "cached_instances" ]
Build lookup table of VM instances known to the cloud provider. The returned dictionary links VM id with the actual VM object.
[ "Build", "lookup", "table", "of", "VM", "instances", "known", "to", "the", "cloud", "provider", "." ]
python
train
35.307692
Rikanishu/static-bundle
static_bundle/bundles.py
https://github.com/Rikanishu/static-bundle/blob/2f6458cb9d9d9049b4fd829f7d6951a45d547c68/static_bundle/bundles.py#L62-L69
def add_file(self, *args): """ Add single file or list of files to bundle :type: file_path: str|unicode """ for file_path in args: self.files.append(FilePath(file_path, self))
[ "def", "add_file", "(", "self", ",", "*", "args", ")", ":", "for", "file_path", "in", "args", ":", "self", ".", "files", ".", "append", "(", "FilePath", "(", "file_path", ",", "self", ")", ")" ]
Add single file or list of files to bundle :type: file_path: str|unicode
[ "Add", "single", "file", "or", "list", "of", "files", "to", "bundle" ]
python
valid
27.625
UCL-INGI/INGInious
inginious/frontend/pages/register.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/register.py#L153-L177
def reset_passwd(self, data): """ Reset the user password """ error = False msg = "" # Check input format if len(data["passwd"]) < 6: error = True msg = _("Password too short.") elif data["passwd"] != data["passwd2"]: error = True msg = _("Passwords don't match !") if not error: passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest() user = self.database.users.find_one_and_update({"reset": data["reset_hash"]}, {"$set": {"password": passwd_hash}, "$unset": {"reset": True, "activate": True}}) if user is None: error = True msg = _("Invalid reset hash.") else: msg = _("Your password has been successfully changed.") return msg, error
[ "def", "reset_passwd", "(", "self", ",", "data", ")", ":", "error", "=", "False", "msg", "=", "\"\"", "# Check input format", "if", "len", "(", "data", "[", "\"passwd\"", "]", ")", "<", "6", ":", "error", "=", "True", "msg", "=", "_", "(", "\"Password too short.\"", ")", "elif", "data", "[", "\"passwd\"", "]", "!=", "data", "[", "\"passwd2\"", "]", ":", "error", "=", "True", "msg", "=", "_", "(", "\"Passwords don't match !\"", ")", "if", "not", "error", ":", "passwd_hash", "=", "hashlib", ".", "sha512", "(", "data", "[", "\"passwd\"", "]", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "user", "=", "self", ".", "database", ".", "users", ".", "find_one_and_update", "(", "{", "\"reset\"", ":", "data", "[", "\"reset_hash\"", "]", "}", ",", "{", "\"$set\"", ":", "{", "\"password\"", ":", "passwd_hash", "}", ",", "\"$unset\"", ":", "{", "\"reset\"", ":", "True", ",", "\"activate\"", ":", "True", "}", "}", ")", "if", "user", "is", "None", ":", "error", "=", "True", "msg", "=", "_", "(", "\"Invalid reset hash.\"", ")", "else", ":", "msg", "=", "_", "(", "\"Your password has been successfully changed.\"", ")", "return", "msg", ",", "error" ]
Reset the user password
[ "Reset", "the", "user", "password" ]
python
train
38.36
dereneaton/ipyrad
ipyrad/assemble/cluster_within.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_within.py#L622-L642
def setup_dirs(data): """ sets up directories for step3 data """ ## make output folder for clusters pdir = os.path.realpath(data.paramsdict["project_dir"]) data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\ .format(data.name, data.paramsdict["clust_threshold"])) if not os.path.exists(data.dirs.clusts): os.mkdir(data.dirs.clusts) ## make a tmpdir for align files data.tmpdir = os.path.abspath(os.path.expanduser( os.path.join(pdir, data.name+'-tmpalign'))) if not os.path.exists(data.tmpdir): os.mkdir(data.tmpdir) ## If ref mapping, init samples and make the refmapping output directory. if not data.paramsdict["assembly_method"] == "denovo": ## make output directory for read mapping process data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name)) if not os.path.exists(data.dirs.refmapping): os.mkdir(data.dirs.refmapping)
[ "def", "setup_dirs", "(", "data", ")", ":", "## make output folder for clusters", "pdir", "=", "os", ".", "path", ".", "realpath", "(", "data", ".", "paramsdict", "[", "\"project_dir\"", "]", ")", "data", ".", "dirs", ".", "clusts", "=", "os", ".", "path", ".", "join", "(", "pdir", ",", "\"{}_clust_{}\"", ".", "format", "(", "data", ".", "name", ",", "data", ".", "paramsdict", "[", "\"clust_threshold\"", "]", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "data", ".", "dirs", ".", "clusts", ")", ":", "os", ".", "mkdir", "(", "data", ".", "dirs", ".", "clusts", ")", "## make a tmpdir for align files", "data", ".", "tmpdir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "join", "(", "pdir", ",", "data", ".", "name", "+", "'-tmpalign'", ")", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "data", ".", "tmpdir", ")", ":", "os", ".", "mkdir", "(", "data", ".", "tmpdir", ")", "## If ref mapping, init samples and make the refmapping output directory.", "if", "not", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", "==", "\"denovo\"", ":", "## make output directory for read mapping process", "data", ".", "dirs", ".", "refmapping", "=", "os", ".", "path", ".", "join", "(", "pdir", ",", "\"{}_refmapping\"", ".", "format", "(", "data", ".", "name", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "data", ".", "dirs", ".", "refmapping", ")", ":", "os", ".", "mkdir", "(", "data", ".", "dirs", ".", "refmapping", ")" ]
sets up directories for step3 data
[ "sets", "up", "directories", "for", "step3", "data" ]
python
valid
45.428571
shakefu/pyconfig
pyconfig/__init__.py
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/__init__.py#L195-L213
def get(self, name, default, allow_default=True): """ Return a setting value. :param str name: Setting key name. :param default: Default value of setting if it's not explicitly set. :param bool allow_default: If true, use the parameter default as default if the key is not set, else raise :exc:`LookupError` :raises: :exc:`LookupError` if allow_default is false and the setting is not set. """ if not self.settings.get('pyconfig.case_sensitive', False): name = name.lower() if name not in self.settings: if not allow_default: raise LookupError('No setting "{name}"'.format(name=name)) self.settings[name] = default return self.settings[name]
[ "def", "get", "(", "self", ",", "name", ",", "default", ",", "allow_default", "=", "True", ")", ":", "if", "not", "self", ".", "settings", ".", "get", "(", "'pyconfig.case_sensitive'", ",", "False", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "not", "in", "self", ".", "settings", ":", "if", "not", "allow_default", ":", "raise", "LookupError", "(", "'No setting \"{name}\"'", ".", "format", "(", "name", "=", "name", ")", ")", "self", ".", "settings", "[", "name", "]", "=", "default", "return", "self", ".", "settings", "[", "name", "]" ]
Return a setting value. :param str name: Setting key name. :param default: Default value of setting if it's not explicitly set. :param bool allow_default: If true, use the parameter default as default if the key is not set, else raise :exc:`LookupError` :raises: :exc:`LookupError` if allow_default is false and the setting is not set.
[ "Return", "a", "setting", "value", "." ]
python
valid
45.736842
tinybike/weightedstats
weightedstats/__init__.py
https://github.com/tinybike/weightedstats/blob/0e2638099dba7f288a1553a83e957a95522229da/weightedstats/__init__.py#L69-L87
def weighted_median(data, weights=None): """Calculate the weighted median of a list.""" if weights is None: return median(data) midpoint = 0.5 * sum(weights) if any([j > midpoint for j in weights]): return data[weights.index(max(weights))] if any([j > 0 for j in weights]): sorted_data, sorted_weights = zip(*sorted(zip(data, weights))) cumulative_weight = 0 below_midpoint_index = 0 while cumulative_weight <= midpoint: below_midpoint_index += 1 cumulative_weight += sorted_weights[below_midpoint_index-1] cumulative_weight -= sorted_weights[below_midpoint_index-1] if cumulative_weight - midpoint < sys.float_info.epsilon: bounds = sorted_data[below_midpoint_index-2:below_midpoint_index] return sum(bounds) / float(len(bounds)) return sorted_data[below_midpoint_index-1]
[ "def", "weighted_median", "(", "data", ",", "weights", "=", "None", ")", ":", "if", "weights", "is", "None", ":", "return", "median", "(", "data", ")", "midpoint", "=", "0.5", "*", "sum", "(", "weights", ")", "if", "any", "(", "[", "j", ">", "midpoint", "for", "j", "in", "weights", "]", ")", ":", "return", "data", "[", "weights", ".", "index", "(", "max", "(", "weights", ")", ")", "]", "if", "any", "(", "[", "j", ">", "0", "for", "j", "in", "weights", "]", ")", ":", "sorted_data", ",", "sorted_weights", "=", "zip", "(", "*", "sorted", "(", "zip", "(", "data", ",", "weights", ")", ")", ")", "cumulative_weight", "=", "0", "below_midpoint_index", "=", "0", "while", "cumulative_weight", "<=", "midpoint", ":", "below_midpoint_index", "+=", "1", "cumulative_weight", "+=", "sorted_weights", "[", "below_midpoint_index", "-", "1", "]", "cumulative_weight", "-=", "sorted_weights", "[", "below_midpoint_index", "-", "1", "]", "if", "cumulative_weight", "-", "midpoint", "<", "sys", ".", "float_info", ".", "epsilon", ":", "bounds", "=", "sorted_data", "[", "below_midpoint_index", "-", "2", ":", "below_midpoint_index", "]", "return", "sum", "(", "bounds", ")", "/", "float", "(", "len", "(", "bounds", ")", ")", "return", "sorted_data", "[", "below_midpoint_index", "-", "1", "]" ]
Calculate the weighted median of a list.
[ "Calculate", "the", "weighted", "median", "of", "a", "list", "." ]
python
train
47.105263
colab/colab-superarchives-plugin
src/colab_superarchives/management/commands/import_emails.py
https://github.com/colab/colab-superarchives-plugin/blob/fe588a1d4fac874ccad2063ee19a857028a22721/src/colab_superarchives/management/commands/import_emails.py#L239-L263
def import_emails(self, archives_path, all, exclude_lists=None): """Get emails from the filesystem from the `archives_path` and store them into the database. If `all` is set to True all the filesystem storage will be imported otherwise the importation will resume from the last message previously imported. The lists set in `exclude_lists` won't be imported. """ count = 0 email_generator = self.get_emails(archives_path, all, exclude_lists) for mailinglist_name, msg, index in email_generator: try: self.save_email(mailinglist_name, msg, index) except: # This anti-pattern is needed to avoid the transations to # get stuck in case of errors. transaction.rollback() raise count += 1 if count % 1000 == 0: transaction.commit() transaction.commit()
[ "def", "import_emails", "(", "self", ",", "archives_path", ",", "all", ",", "exclude_lists", "=", "None", ")", ":", "count", "=", "0", "email_generator", "=", "self", ".", "get_emails", "(", "archives_path", ",", "all", ",", "exclude_lists", ")", "for", "mailinglist_name", ",", "msg", ",", "index", "in", "email_generator", ":", "try", ":", "self", ".", "save_email", "(", "mailinglist_name", ",", "msg", ",", "index", ")", "except", ":", "# This anti-pattern is needed to avoid the transations to", "# get stuck in case of errors.", "transaction", ".", "rollback", "(", ")", "raise", "count", "+=", "1", "if", "count", "%", "1000", "==", "0", ":", "transaction", ".", "commit", "(", ")", "transaction", ".", "commit", "(", ")" ]
Get emails from the filesystem from the `archives_path` and store them into the database. If `all` is set to True all the filesystem storage will be imported otherwise the importation will resume from the last message previously imported. The lists set in `exclude_lists` won't be imported.
[ "Get", "emails", "from", "the", "filesystem", "from", "the", "archives_path", "and", "store", "them", "into", "the", "database", ".", "If", "all", "is", "set", "to", "True", "all", "the", "filesystem", "storage", "will", "be", "imported", "otherwise", "the", "importation", "will", "resume", "from", "the", "last", "message", "previously", "imported", ".", "The", "lists", "set", "in", "exclude_lists", "won", "t", "be", "imported", "." ]
python
train
38.08
gitpython-developers/GitPython
git/objects/commit.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/commit.py#L251-L279
def _iter_from_process_or_stream(cls, repo, proc_or_stream): """Parse out commit information into a list of Commit objects We expect one-line per commit, and parse the actual commit information directly from our lighting fast object database :param proc: git-rev-list process instance - one sha per line :return: iterator returning Commit objects""" stream = proc_or_stream if not hasattr(stream, 'readline'): stream = proc_or_stream.stdout readline = stream.readline while True: line = readline() if not line: break hexsha = line.strip() if len(hexsha) > 40: # split additional information, as returned by bisect for instance hexsha, _ = line.split(None, 1) # END handle extra info assert len(hexsha) == 40, "Invalid line: %s" % hexsha yield Commit(repo, hex_to_bin(hexsha)) # END for each line in stream # TODO: Review this - it seems process handling got a bit out of control # due to many developers trying to fix the open file handles issue if hasattr(proc_or_stream, 'wait'): finalize_process(proc_or_stream)
[ "def", "_iter_from_process_or_stream", "(", "cls", ",", "repo", ",", "proc_or_stream", ")", ":", "stream", "=", "proc_or_stream", "if", "not", "hasattr", "(", "stream", ",", "'readline'", ")", ":", "stream", "=", "proc_or_stream", ".", "stdout", "readline", "=", "stream", ".", "readline", "while", "True", ":", "line", "=", "readline", "(", ")", "if", "not", "line", ":", "break", "hexsha", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "hexsha", ")", ">", "40", ":", "# split additional information, as returned by bisect for instance", "hexsha", ",", "_", "=", "line", ".", "split", "(", "None", ",", "1", ")", "# END handle extra info", "assert", "len", "(", "hexsha", ")", "==", "40", ",", "\"Invalid line: %s\"", "%", "hexsha", "yield", "Commit", "(", "repo", ",", "hex_to_bin", "(", "hexsha", ")", ")", "# END for each line in stream", "# TODO: Review this - it seems process handling got a bit out of control", "# due to many developers trying to fix the open file handles issue", "if", "hasattr", "(", "proc_or_stream", ",", "'wait'", ")", ":", "finalize_process", "(", "proc_or_stream", ")" ]
Parse out commit information into a list of Commit objects We expect one-line per commit, and parse the actual commit information directly from our lighting fast object database :param proc: git-rev-list process instance - one sha per line :return: iterator returning Commit objects
[ "Parse", "out", "commit", "information", "into", "a", "list", "of", "Commit", "objects", "We", "expect", "one", "-", "line", "per", "commit", "and", "parse", "the", "actual", "commit", "information", "directly", "from", "our", "lighting", "fast", "object", "database" ]
python
train
43.068966
F5Networks/f5-common-python
f5/bigip/resource.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/resource.py#L519-L594
def _update(self, **kwargs): """wrapped with update, override that in a subclass to customize""" requests_params, update_uri, session, read_only = \ self._prepare_put_or_patch(kwargs) read_only_mutations = [] for attr in read_only: if attr in kwargs: read_only_mutations.append(attr) if read_only_mutations: msg = 'Attempted to mutate read-only attribute(s): %s' \ % read_only_mutations raise AttemptedMutationOfReadOnly(msg) # Get the current state of the object on BIG-IP® and check the # generation Use pop here because we don't want force in the data_dict force = self._check_force_arg(kwargs.pop('force', True)) if not force: # generation has a known server-side error self._check_generation() kwargs = self._check_for_boolean_pair_reduction(kwargs) # Save the meta data so we can add it back into self after we # load the new object. temp_meta = self.__dict__.pop('_meta_data') # Need to remove any of the Collection objects from self.__dict__ # because these are subCollections and _meta_data and # other non-BIG-IP® attrs are not removed from the subCollections # See issue #146 for details tmp = dict() for key, value in iteritems(self.__dict__): # In Python2 versions we were changing a dictionary in place, # but this cannot be done with an iterator as an error is raised. # So instead we create a temporary holder for the modified dict # and then re-assign it afterwards. if isinstance(value, Collection): pass else: tmp[key] = value self.__dict__ = tmp data_dict = self.to_dict() # Remove any read-only attributes from our data_dict before we update # the data dict with the attributes. If they pass in read-only attrs # in the method call we are going to let BIG-IP® let them know about it # when it fails for attr in read_only: data_dict.pop(attr, '') data_dict.update(kwargs) data_dict = self._prepare_request_json(data_dict) # Handles ConnectionAborted errors # # @see https://github.com/F5Networks/f5-ansible/issues/317 # @see https://github.com/requests/requests/issues/2364 for _ in range(0, 30): try: response = session.put(update_uri, json=data_dict, **requests_params) self._meta_data = temp_meta self._local_update(response.json()) break except iControlUnexpectedHTTPError: response = session.get(update_uri, **requests_params) self._meta_data = temp_meta self._local_update(response.json()) raise except ConnectionError as ex: if 'Connection aborted' in str(ex): time.sleep(1) continue else: raise
[ "def", "_update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "requests_params", ",", "update_uri", ",", "session", ",", "read_only", "=", "self", ".", "_prepare_put_or_patch", "(", "kwargs", ")", "read_only_mutations", "=", "[", "]", "for", "attr", "in", "read_only", ":", "if", "attr", "in", "kwargs", ":", "read_only_mutations", ".", "append", "(", "attr", ")", "if", "read_only_mutations", ":", "msg", "=", "'Attempted to mutate read-only attribute(s): %s'", "%", "read_only_mutations", "raise", "AttemptedMutationOfReadOnly", "(", "msg", ")", "# Get the current state of the object on BIG-IP® and check the", "# generation Use pop here because we don't want force in the data_dict", "force", "=", "self", ".", "_check_force_arg", "(", "kwargs", ".", "pop", "(", "'force'", ",", "True", ")", ")", "if", "not", "force", ":", "# generation has a known server-side error", "self", ".", "_check_generation", "(", ")", "kwargs", "=", "self", ".", "_check_for_boolean_pair_reduction", "(", "kwargs", ")", "# Save the meta data so we can add it back into self after we", "# load the new object.", "temp_meta", "=", "self", ".", "__dict__", ".", "pop", "(", "'_meta_data'", ")", "# Need to remove any of the Collection objects from self.__dict__", "# because these are subCollections and _meta_data and", "# other non-BIG-IP® attrs are not removed from the subCollections", "# See issue #146 for details", "tmp", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "iteritems", "(", "self", ".", "__dict__", ")", ":", "# In Python2 versions we were changing a dictionary in place,", "# but this cannot be done with an iterator as an error is raised.", "# So instead we create a temporary holder for the modified dict", "# and then re-assign it afterwards.", "if", "isinstance", "(", "value", ",", "Collection", ")", ":", "pass", "else", ":", "tmp", "[", "key", "]", "=", "value", "self", ".", "__dict__", "=", "tmp", "data_dict", "=", "self", ".", "to_dict", "(", ")", "# Remove any read-only attributes from our data_dict before we update", "# the data dict with the attributes. If they pass in read-only attrs", "# in the method call we are going to let BIG-IP® let them know about it", "# when it fails", "for", "attr", "in", "read_only", ":", "data_dict", ".", "pop", "(", "attr", ",", "''", ")", "data_dict", ".", "update", "(", "kwargs", ")", "data_dict", "=", "self", ".", "_prepare_request_json", "(", "data_dict", ")", "# Handles ConnectionAborted errors", "#", "# @see https://github.com/F5Networks/f5-ansible/issues/317", "# @see https://github.com/requests/requests/issues/2364", "for", "_", "in", "range", "(", "0", ",", "30", ")", ":", "try", ":", "response", "=", "session", ".", "put", "(", "update_uri", ",", "json", "=", "data_dict", ",", "*", "*", "requests_params", ")", "self", ".", "_meta_data", "=", "temp_meta", "self", ".", "_local_update", "(", "response", ".", "json", "(", ")", ")", "break", "except", "iControlUnexpectedHTTPError", ":", "response", "=", "session", ".", "get", "(", "update_uri", ",", "*", "*", "requests_params", ")", "self", ".", "_meta_data", "=", "temp_meta", "self", ".", "_local_update", "(", "response", ".", "json", "(", ")", ")", "raise", "except", "ConnectionError", "as", "ex", ":", "if", "'Connection aborted'", "in", "str", "(", "ex", ")", ":", "time", ".", "sleep", "(", "1", ")", "continue", "else", ":", "raise" ]
wrapped with update, override that in a subclass to customize
[ "wrapped", "with", "update", "override", "that", "in", "a", "subclass", "to", "customize" ]
python
train
40.776316
census-instrumentation/opencensus-python
opencensus/metrics/export/gauge.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/gauge.py#L336-L355
def get_or_create_time_series(self, label_values): """Get a mutable measurement for the given set of label values. :type label_values: list(:class:`LabelValue`) :param label_values: The measurement's label values. :rtype: :class:`GaugePointLong`, :class:`GaugePointDouble` :class:`opencensus.metrics.export.cumulative.CumulativePointLong`, or :class:`opencensus.metrics.export.cumulative.CumulativePointDouble` :return: A mutable point that represents the last value of the measurement. """ if label_values is None: raise ValueError if any(lv is None for lv in label_values): raise ValueError if len(label_values) != self._len_label_keys: raise ValueError return self._get_or_create_time_series(label_values)
[ "def", "get_or_create_time_series", "(", "self", ",", "label_values", ")", ":", "if", "label_values", "is", "None", ":", "raise", "ValueError", "if", "any", "(", "lv", "is", "None", "for", "lv", "in", "label_values", ")", ":", "raise", "ValueError", "if", "len", "(", "label_values", ")", "!=", "self", ".", "_len_label_keys", ":", "raise", "ValueError", "return", "self", ".", "_get_or_create_time_series", "(", "label_values", ")" ]
Get a mutable measurement for the given set of label values. :type label_values: list(:class:`LabelValue`) :param label_values: The measurement's label values. :rtype: :class:`GaugePointLong`, :class:`GaugePointDouble` :class:`opencensus.metrics.export.cumulative.CumulativePointLong`, or :class:`opencensus.metrics.export.cumulative.CumulativePointDouble` :return: A mutable point that represents the last value of the measurement.
[ "Get", "a", "mutable", "measurement", "for", "the", "given", "set", "of", "label", "values", "." ]
python
train
42.75
cloud-custodian/cloud-custodian
c7n/resources/appelb.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/resources/appelb.py#L81-L88
def get_resources(self, ids, cache=True): """Support server side filtering on arns or names """ if ids[0].startswith('arn:'): params = {'LoadBalancerArns': ids} else: params = {'Names': ids} return self.query.filter(self.manager, **params)
[ "def", "get_resources", "(", "self", ",", "ids", ",", "cache", "=", "True", ")", ":", "if", "ids", "[", "0", "]", ".", "startswith", "(", "'arn:'", ")", ":", "params", "=", "{", "'LoadBalancerArns'", ":", "ids", "}", "else", ":", "params", "=", "{", "'Names'", ":", "ids", "}", "return", "self", ".", "query", ".", "filter", "(", "self", ".", "manager", ",", "*", "*", "params", ")" ]
Support server side filtering on arns or names
[ "Support", "server", "side", "filtering", "on", "arns", "or", "names" ]
python
train
37
blag/django-secure-mail
secure_mail/utils.py
https://github.com/blag/django-secure-mail/blob/52987b6ce829e6de2dc8ab38ed3190bc2752b341/secure_mail/utils.py#L25-L31
def addresses_for_key(gpg, key): """ Takes a key and extracts the email addresses for it. """ return [address.split("<")[-1].strip(">") for address in gpg.list_keys().key_map[key['fingerprint']]["uids"] if address]
[ "def", "addresses_for_key", "(", "gpg", ",", "key", ")", ":", "return", "[", "address", ".", "split", "(", "\"<\"", ")", "[", "-", "1", "]", ".", "strip", "(", "\">\"", ")", "for", "address", "in", "gpg", ".", "list_keys", "(", ")", ".", "key_map", "[", "key", "[", "'fingerprint'", "]", "]", "[", "\"uids\"", "]", "if", "address", "]" ]
Takes a key and extracts the email addresses for it.
[ "Takes", "a", "key", "and", "extracts", "the", "email", "addresses", "for", "it", "." ]
python
train
36.285714
cloudendpoints/endpoints-python
endpoints/api_config.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L2181-L2224
def get_descriptor_defaults(self, api_info, hostname=None): """Gets a default configuration for a service. Args: api_info: _ApiInfo object for this service. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary with the default configuration. """ hostname = (hostname or endpoints_util.get_app_hostname() or api_info.hostname) protocol = 'http' if ((hostname and hostname.startswith('localhost')) or endpoints_util.is_running_on_devserver()) else 'https' base_path = api_info.base_path.strip('/') defaults = { 'extends': 'thirdParty.api', 'root': '{0}://{1}/{2}'.format(protocol, hostname, base_path), 'name': api_info.name, 'version': api_info.api_version, 'api_version': api_info.api_version, 'path_version': api_info.path_version, 'defaultVersion': True, 'abstract': False, 'adapter': { 'bns': '{0}://{1}/{2}'.format(protocol, hostname, base_path), 'type': 'lily', 'deadline': 10.0 } } if api_info.canonical_name: defaults['canonicalName'] = api_info.canonical_name if api_info.owner_domain: defaults['ownerDomain'] = api_info.owner_domain if api_info.owner_name: defaults['ownerName'] = api_info.owner_name if api_info.package_path: defaults['packagePath'] = api_info.package_path if api_info.title: defaults['title'] = api_info.title if api_info.documentation: defaults['documentation'] = api_info.documentation return defaults
[ "def", "get_descriptor_defaults", "(", "self", ",", "api_info", ",", "hostname", "=", "None", ")", ":", "hostname", "=", "(", "hostname", "or", "endpoints_util", ".", "get_app_hostname", "(", ")", "or", "api_info", ".", "hostname", ")", "protocol", "=", "'http'", "if", "(", "(", "hostname", "and", "hostname", ".", "startswith", "(", "'localhost'", ")", ")", "or", "endpoints_util", ".", "is_running_on_devserver", "(", ")", ")", "else", "'https'", "base_path", "=", "api_info", ".", "base_path", ".", "strip", "(", "'/'", ")", "defaults", "=", "{", "'extends'", ":", "'thirdParty.api'", ",", "'root'", ":", "'{0}://{1}/{2}'", ".", "format", "(", "protocol", ",", "hostname", ",", "base_path", ")", ",", "'name'", ":", "api_info", ".", "name", ",", "'version'", ":", "api_info", ".", "api_version", ",", "'api_version'", ":", "api_info", ".", "api_version", ",", "'path_version'", ":", "api_info", ".", "path_version", ",", "'defaultVersion'", ":", "True", ",", "'abstract'", ":", "False", ",", "'adapter'", ":", "{", "'bns'", ":", "'{0}://{1}/{2}'", ".", "format", "(", "protocol", ",", "hostname", ",", "base_path", ")", ",", "'type'", ":", "'lily'", ",", "'deadline'", ":", "10.0", "}", "}", "if", "api_info", ".", "canonical_name", ":", "defaults", "[", "'canonicalName'", "]", "=", "api_info", ".", "canonical_name", "if", "api_info", ".", "owner_domain", ":", "defaults", "[", "'ownerDomain'", "]", "=", "api_info", ".", "owner_domain", "if", "api_info", ".", "owner_name", ":", "defaults", "[", "'ownerName'", "]", "=", "api_info", ".", "owner_name", "if", "api_info", ".", "package_path", ":", "defaults", "[", "'packagePath'", "]", "=", "api_info", ".", "package_path", "if", "api_info", ".", "title", ":", "defaults", "[", "'title'", "]", "=", "api_info", ".", "title", "if", "api_info", ".", "documentation", ":", "defaults", "[", "'documentation'", "]", "=", "api_info", ".", "documentation", "return", "defaults" ]
Gets a default configuration for a service. Args: api_info: _ApiInfo object for this service. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary with the default configuration.
[ "Gets", "a", "default", "configuration", "for", "a", "service", "." ]
python
train
37.5
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L304-L383
def convert_convolution(builder, layer, input_names, output_names, keras_layer): """ Convert convolution layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) has_bias = keras_layer.use_bias is_deconv = isinstance(keras_layer, _keras.layers.convolutional.Conv2DTranspose) # Get the weights from _keras. weightList = keras_layer.get_weights() # Dimensions and weights if is_deconv: height, width, n_filters, channels = weightList[0].shape W = weightList[0].transpose([0,1,3,2]) try: output_blob_shape = list(filter(None, keras_layer.output_shape)) output_shape = output_blob_shape[:-1] except: output_shape = None else: height, width, channels, n_filters = weightList[0].shape W = weightList[0] output_shape = None b = weightList[1] if has_bias else None output_channels = n_filters stride_height, stride_width = keras_layer.strides # Dilations dilations = [1,1] if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple): dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]] else: dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate] if is_deconv and not dilations == [1,1]: raise ValueError("Unsupported non-unity dilation for Deconvolution layer") groups = 1 kernel_channels = channels # depth-wise convolution if isinstance(keras_layer, DepthwiseConv2D): groups = channels kernel_channels = 1 depth_multiplier = keras_layer.depth_multiplier W = _np.reshape(W,(height, width,1,channels * depth_multiplier)) output_channels = channels * depth_multiplier builder.add_convolution(name = layer, kernel_channels = kernel_channels, output_channels = output_channels, height = height, width = width, stride_height = stride_height, stride_width = stride_width, border_mode = keras_layer.padding, groups = groups, W = W, b = b, has_bias = has_bias, is_deconv = is_deconv, output_shape = output_shape, input_name = input_name, output_name = output_name, dilation_factors = dilations)
[ "def", "convert_convolution", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "_check_data_format", "(", "keras_layer", ")", "# Get input and output names", "input_name", ",", "output_name", "=", "(", "input_names", "[", "0", "]", ",", "output_names", "[", "0", "]", ")", "has_bias", "=", "keras_layer", ".", "use_bias", "is_deconv", "=", "isinstance", "(", "keras_layer", ",", "_keras", ".", "layers", ".", "convolutional", ".", "Conv2DTranspose", ")", "# Get the weights from _keras.", "weightList", "=", "keras_layer", ".", "get_weights", "(", ")", "# Dimensions and weights", "if", "is_deconv", ":", "height", ",", "width", ",", "n_filters", ",", "channels", "=", "weightList", "[", "0", "]", ".", "shape", "W", "=", "weightList", "[", "0", "]", ".", "transpose", "(", "[", "0", ",", "1", ",", "3", ",", "2", "]", ")", "try", ":", "output_blob_shape", "=", "list", "(", "filter", "(", "None", ",", "keras_layer", ".", "output_shape", ")", ")", "output_shape", "=", "output_blob_shape", "[", ":", "-", "1", "]", "except", ":", "output_shape", "=", "None", "else", ":", "height", ",", "width", ",", "channels", ",", "n_filters", "=", "weightList", "[", "0", "]", ".", "shape", "W", "=", "weightList", "[", "0", "]", "output_shape", "=", "None", "b", "=", "weightList", "[", "1", "]", "if", "has_bias", "else", "None", "output_channels", "=", "n_filters", "stride_height", ",", "stride_width", "=", "keras_layer", ".", "strides", "# Dilations", "dilations", "=", "[", "1", ",", "1", "]", "if", "(", "type", "(", "keras_layer", ".", "dilation_rate", ")", "is", "list", ")", "or", "(", "type", "(", "keras_layer", ".", "dilation_rate", ")", "is", "tuple", ")", ":", "dilations", "=", "[", "keras_layer", ".", "dilation_rate", "[", "0", "]", ",", "keras_layer", ".", "dilation_rate", "[", "1", "]", "]", "else", ":", "dilations", "=", "[", "keras_layer", ".", "dilation_rate", ",", "keras_layer", ".", "dilation_rate", "]", "if", "is_deconv", "and", "not", "dilations", "==", "[", "1", ",", "1", "]", ":", "raise", "ValueError", "(", "\"Unsupported non-unity dilation for Deconvolution layer\"", ")", "groups", "=", "1", "kernel_channels", "=", "channels", "# depth-wise convolution", "if", "isinstance", "(", "keras_layer", ",", "DepthwiseConv2D", ")", ":", "groups", "=", "channels", "kernel_channels", "=", "1", "depth_multiplier", "=", "keras_layer", ".", "depth_multiplier", "W", "=", "_np", ".", "reshape", "(", "W", ",", "(", "height", ",", "width", ",", "1", ",", "channels", "*", "depth_multiplier", ")", ")", "output_channels", "=", "channels", "*", "depth_multiplier", "builder", ".", "add_convolution", "(", "name", "=", "layer", ",", "kernel_channels", "=", "kernel_channels", ",", "output_channels", "=", "output_channels", ",", "height", "=", "height", ",", "width", "=", "width", ",", "stride_height", "=", "stride_height", ",", "stride_width", "=", "stride_width", ",", "border_mode", "=", "keras_layer", ".", "padding", ",", "groups", "=", "groups", ",", "W", "=", "W", ",", "b", "=", "b", ",", "has_bias", "=", "has_bias", ",", "is_deconv", "=", "is_deconv", ",", "output_shape", "=", "output_shape", ",", "input_name", "=", "input_name", ",", "output_name", "=", "output_name", ",", "dilation_factors", "=", "dilations", ")" ]
Convert convolution layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
[ "Convert", "convolution", "layer", "from", "keras", "to", "coreml", "." ]
python
train
32.6375
Parsely/schemato
schemato/validator.py
https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L174-L189
def _superclasses_for_subject(self, graph, typeof): """helper, returns a list of all superclasses of a given class""" # TODO - this might be replacing a fairly simple graph API query where # it doesn't need to classes = [] superclass = typeof while True: found = False for p, o in self.schema_def.ontology[superclass]: if self.schema_def.lexicon['subclass'] == str(p): found = True classes.append(o) superclass = o if not found: break return classes
[ "def", "_superclasses_for_subject", "(", "self", ",", "graph", ",", "typeof", ")", ":", "# TODO - this might be replacing a fairly simple graph API query where", "# it doesn't need to", "classes", "=", "[", "]", "superclass", "=", "typeof", "while", "True", ":", "found", "=", "False", "for", "p", ",", "o", "in", "self", ".", "schema_def", ".", "ontology", "[", "superclass", "]", ":", "if", "self", ".", "schema_def", ".", "lexicon", "[", "'subclass'", "]", "==", "str", "(", "p", ")", ":", "found", "=", "True", "classes", ".", "append", "(", "o", ")", "superclass", "=", "o", "if", "not", "found", ":", "break", "return", "classes" ]
helper, returns a list of all superclasses of a given class
[ "helper", "returns", "a", "list", "of", "all", "superclasses", "of", "a", "given", "class" ]
python
train
38.625
xflows/rdm
rdm/db/converters.py
https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/db/converters.py#L157-L166
def all_examples(self, pred_name=None): ''' Emits all examples in prolog form for RSD. :param pred_name: override for the emitted predicate name ''' target = self.db.target_table pred_name = pred_name if pred_name else target examples = self.db.rows(target, [self.db.target_att, self.db.pkeys[target]]) return '\n'.join(["%s(%s, %s)." % (pred_name, ILPConverter.fmt_col(cls), pk) for cls, pk in examples])
[ "def", "all_examples", "(", "self", ",", "pred_name", "=", "None", ")", ":", "target", "=", "self", ".", "db", ".", "target_table", "pred_name", "=", "pred_name", "if", "pred_name", "else", "target", "examples", "=", "self", ".", "db", ".", "rows", "(", "target", ",", "[", "self", ".", "db", ".", "target_att", ",", "self", ".", "db", ".", "pkeys", "[", "target", "]", "]", ")", "return", "'\\n'", ".", "join", "(", "[", "\"%s(%s, %s).\"", "%", "(", "pred_name", ",", "ILPConverter", ".", "fmt_col", "(", "cls", ")", ",", "pk", ")", "for", "cls", ",", "pk", "in", "examples", "]", ")" ]
Emits all examples in prolog form for RSD. :param pred_name: override for the emitted predicate name
[ "Emits", "all", "examples", "in", "prolog", "form", "for", "RSD", "." ]
python
train
46.5
asphalt-framework/asphalt-templating
asphalt/templating/api.py
https://github.com/asphalt-framework/asphalt-templating/blob/e5f836290820aa295b048b17b96d3896d5f1eeac/asphalt/templating/api.py#L53-L65
def render(self, template: str, **vars) -> str: """ Render the named template. The current context will be available to the template as the ``ctx`` variable. :param template: name of the template file :param vars: extra template variables :return: the rendered results """ vars.setdefault('ctx', self._ctx) return self._renderer.render(template, **vars)
[ "def", "render", "(", "self", ",", "template", ":", "str", ",", "*", "*", "vars", ")", "->", "str", ":", "vars", ".", "setdefault", "(", "'ctx'", ",", "self", ".", "_ctx", ")", "return", "self", ".", "_renderer", ".", "render", "(", "template", ",", "*", "*", "vars", ")" ]
Render the named template. The current context will be available to the template as the ``ctx`` variable. :param template: name of the template file :param vars: extra template variables :return: the rendered results
[ "Render", "the", "named", "template", "." ]
python
train
32
iotile/coretools
iotilecore/iotile/core/utilities/intelhex/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L1100-L1124
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3): """Diff 2 IntelHex objects and produce unified diff output for their hex dumps. @param ih1 first IntelHex object to compare @param ih2 second IntelHex object to compare @param tofile file-like object to write output @param name1 name of the first hex file to show in the diff header @param name2 name of the first hex file to show in the diff header @param n_context number of context lines in the unidiff output """ def prepare_lines(ih): sio = StringIO() ih.dump(sio) dump = sio.getvalue() lines = dump.splitlines() return lines a = prepare_lines(ih1) b = prepare_lines(ih2) import difflib result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm='')) if tofile is None: tofile = sys.stdout output = '\n'.join(result)+'\n' tofile.write(output)
[ "def", "diff_dumps", "(", "ih1", ",", "ih2", ",", "tofile", "=", "None", ",", "name1", "=", "\"a\"", ",", "name2", "=", "\"b\"", ",", "n_context", "=", "3", ")", ":", "def", "prepare_lines", "(", "ih", ")", ":", "sio", "=", "StringIO", "(", ")", "ih", ".", "dump", "(", "sio", ")", "dump", "=", "sio", ".", "getvalue", "(", ")", "lines", "=", "dump", ".", "splitlines", "(", ")", "return", "lines", "a", "=", "prepare_lines", "(", "ih1", ")", "b", "=", "prepare_lines", "(", "ih2", ")", "import", "difflib", "result", "=", "list", "(", "difflib", ".", "unified_diff", "(", "a", ",", "b", ",", "fromfile", "=", "name1", ",", "tofile", "=", "name2", ",", "n", "=", "n_context", ",", "lineterm", "=", "''", ")", ")", "if", "tofile", "is", "None", ":", "tofile", "=", "sys", ".", "stdout", "output", "=", "'\\n'", ".", "join", "(", "result", ")", "+", "'\\n'", "tofile", ".", "write", "(", "output", ")" ]
Diff 2 IntelHex objects and produce unified diff output for their hex dumps. @param ih1 first IntelHex object to compare @param ih2 second IntelHex object to compare @param tofile file-like object to write output @param name1 name of the first hex file to show in the diff header @param name2 name of the first hex file to show in the diff header @param n_context number of context lines in the unidiff output
[ "Diff", "2", "IntelHex", "objects", "and", "produce", "unified", "diff", "output", "for", "their", "hex", "dumps", "." ]
python
train
39.08
tensorpack/tensorpack
tensorpack/callbacks/param.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/callbacks/param.py#L68-L76
def setup_graph(self): """ Will setup the assign operator for that variable. """ all_vars = tfv1.global_variables() + tfv1.local_variables() for v in all_vars: if v.name == self.var_name: self.var = v break else: raise ValueError("{} is not a variable in the graph!".format(self.var_name))
[ "def", "setup_graph", "(", "self", ")", ":", "all_vars", "=", "tfv1", ".", "global_variables", "(", ")", "+", "tfv1", ".", "local_variables", "(", ")", "for", "v", "in", "all_vars", ":", "if", "v", ".", "name", "==", "self", ".", "var_name", ":", "self", ".", "var", "=", "v", "break", "else", ":", "raise", "ValueError", "(", "\"{} is not a variable in the graph!\"", ".", "format", "(", "self", ".", "var_name", ")", ")" ]
Will setup the assign operator for that variable.
[ "Will", "setup", "the", "assign", "operator", "for", "that", "variable", "." ]
python
train
41
soasme/dogeon
dson/__init__.py
https://github.com/soasme/dogeon/blob/496b9a5b099946d14434ed0cd7a94a270f607207/dson/__init__.py#L125-L193
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, sort_keys=False, **kw): """Serialize ``obj`` as a DSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is ``False``, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to cause an error. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the DSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then DSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. Since the default item separator is ``', '``, the output might include trailing whitespace when ``indent`` is specified. You can use ``separators=(',', ': ')`` to avoid this. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``('and ', 'is ')`` separators. ``('and', 'is')`` is the most compact DSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *sort_keys* is ``True`` (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``DSONEncoder`` is used. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not sort_keys and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = DSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, sort_keys=sort_keys, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk)
[ "def", "dump", "(", "obj", ",", "fp", ",", "skipkeys", "=", "False", ",", "ensure_ascii", "=", "True", ",", "check_circular", "=", "True", ",", "allow_nan", "=", "True", ",", "cls", "=", "None", ",", "indent", "=", "None", ",", "separators", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "default", "=", "None", ",", "sort_keys", "=", "False", ",", "*", "*", "kw", ")", ":", "# cached encoder", "if", "(", "not", "skipkeys", "and", "ensure_ascii", "and", "check_circular", "and", "allow_nan", "and", "cls", "is", "None", "and", "indent", "is", "None", "and", "separators", "is", "None", "and", "encoding", "==", "'utf-8'", "and", "default", "is", "None", "and", "not", "sort_keys", "and", "not", "kw", ")", ":", "iterable", "=", "_default_encoder", ".", "iterencode", "(", "obj", ")", "else", ":", "if", "cls", "is", "None", ":", "cls", "=", "DSONEncoder", "iterable", "=", "cls", "(", "skipkeys", "=", "skipkeys", ",", "ensure_ascii", "=", "ensure_ascii", ",", "check_circular", "=", "check_circular", ",", "allow_nan", "=", "allow_nan", ",", "indent", "=", "indent", ",", "separators", "=", "separators", ",", "encoding", "=", "encoding", ",", "default", "=", "default", ",", "sort_keys", "=", "sort_keys", ",", "*", "*", "kw", ")", ".", "iterencode", "(", "obj", ")", "# could accelerate with writelines in some versions of Python, at", "# a debuggability cost", "for", "chunk", "in", "iterable", ":", "fp", ".", "write", "(", "chunk", ")" ]
Serialize ``obj`` as a DSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is ``False``, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to cause an error. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the DSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then DSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. Since the default item separator is ``', '``, the output might include trailing whitespace when ``indent`` is specified. You can use ``separators=(',', ': ')`` to avoid this. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``('and ', 'is ')`` separators. ``('and', 'is')`` is the most compact DSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *sort_keys* is ``True`` (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``DSONEncoder`` is used.
[ "Serialize", "obj", "as", "a", "DSON", "formatted", "stream", "to", "fp", "(", "a", ".", "write", "()", "-", "supporting", "file", "-", "like", "object", ")", "." ]
python
train
50.028986
liampauling/betfair
betfairlightweight/endpoints/scores.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/scores.py#L52-L66
def list_scores(self, update_keys, session=None, lightweight=None): """ Returns a list of current scores for the given events. :param list update_keys: The filter to select desired markets. All markets that match the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}] :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.Score] """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'listScores') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.Score, elapsed_time, lightweight)
[ "def", "list_scores", "(", "self", ",", "update_keys", ",", "session", "=", "None", ",", "lightweight", "=", "None", ")", ":", "params", "=", "clean_locals", "(", "locals", "(", ")", ")", "method", "=", "'%s%s'", "%", "(", "self", ".", "URI", ",", "'listScores'", ")", "(", "response", ",", "elapsed_time", ")", "=", "self", ".", "request", "(", "method", ",", "params", ",", "session", ")", "return", "self", ".", "process_response", "(", "response", ",", "resources", ".", "Score", ",", "elapsed_time", ",", "lightweight", ")" ]
Returns a list of current scores for the given events. :param list update_keys: The filter to select desired markets. All markets that match the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}] :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.Score]
[ "Returns", "a", "list", "of", "current", "scores", "for", "the", "given", "events", "." ]
python
train
52
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L1153-L1211
def visit_lambda(self, node): """check whether or not the lambda is suspicious """ # if the body of the lambda is a call expression with the same # argument list as the lambda itself, then the lambda is # possibly unnecessary and at least suspicious. if node.args.defaults: # If the arguments of the lambda include defaults, then a # judgment cannot be made because there is no way to check # that the defaults defined by the lambda are the same as # the defaults defined by the function called in the body # of the lambda. return call = node.body if not isinstance(call, astroid.Call): # The body of the lambda must be a function call expression # for the lambda to be unnecessary. return if isinstance(node.body.func, astroid.Attribute) and isinstance( node.body.func.expr, astroid.Call ): # Chained call, the intermediate call might # return something else (but we don't check that, yet). return call_site = CallSite.from_call(call) ordinary_args = list(node.args.args) new_call_args = list(self._filter_vararg(node, call.args)) if node.args.kwarg: if self._has_variadic_argument(call.kwargs, node.args.kwarg): return if node.args.vararg: if self._has_variadic_argument(call.starargs, node.args.vararg): return elif call.starargs: return if call.keywords: # Look for additional keyword arguments that are not part # of the lambda's signature lambda_kwargs = {keyword.name for keyword in node.args.defaults} if len(lambda_kwargs) != len(call_site.keyword_arguments): # Different lengths, so probably not identical return if set(call_site.keyword_arguments).difference(lambda_kwargs): return # The "ordinary" arguments must be in a correspondence such that: # ordinary_args[i].name == call.args[i].name. if len(ordinary_args) != len(new_call_args): return for arg, passed_arg in zip(ordinary_args, new_call_args): if not isinstance(passed_arg, astroid.Name): return if arg.name != passed_arg.name: return self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
[ "def", "visit_lambda", "(", "self", ",", "node", ")", ":", "# if the body of the lambda is a call expression with the same", "# argument list as the lambda itself, then the lambda is", "# possibly unnecessary and at least suspicious.", "if", "node", ".", "args", ".", "defaults", ":", "# If the arguments of the lambda include defaults, then a", "# judgment cannot be made because there is no way to check", "# that the defaults defined by the lambda are the same as", "# the defaults defined by the function called in the body", "# of the lambda.", "return", "call", "=", "node", ".", "body", "if", "not", "isinstance", "(", "call", ",", "astroid", ".", "Call", ")", ":", "# The body of the lambda must be a function call expression", "# for the lambda to be unnecessary.", "return", "if", "isinstance", "(", "node", ".", "body", ".", "func", ",", "astroid", ".", "Attribute", ")", "and", "isinstance", "(", "node", ".", "body", ".", "func", ".", "expr", ",", "astroid", ".", "Call", ")", ":", "# Chained call, the intermediate call might", "# return something else (but we don't check that, yet).", "return", "call_site", "=", "CallSite", ".", "from_call", "(", "call", ")", "ordinary_args", "=", "list", "(", "node", ".", "args", ".", "args", ")", "new_call_args", "=", "list", "(", "self", ".", "_filter_vararg", "(", "node", ",", "call", ".", "args", ")", ")", "if", "node", ".", "args", ".", "kwarg", ":", "if", "self", ".", "_has_variadic_argument", "(", "call", ".", "kwargs", ",", "node", ".", "args", ".", "kwarg", ")", ":", "return", "if", "node", ".", "args", ".", "vararg", ":", "if", "self", ".", "_has_variadic_argument", "(", "call", ".", "starargs", ",", "node", ".", "args", ".", "vararg", ")", ":", "return", "elif", "call", ".", "starargs", ":", "return", "if", "call", ".", "keywords", ":", "# Look for additional keyword arguments that are not part", "# of the lambda's signature", "lambda_kwargs", "=", "{", "keyword", ".", "name", "for", "keyword", "in", "node", ".", "args", ".", "defaults", "}", "if", "len", "(", "lambda_kwargs", ")", "!=", "len", "(", "call_site", ".", "keyword_arguments", ")", ":", "# Different lengths, so probably not identical", "return", "if", "set", "(", "call_site", ".", "keyword_arguments", ")", ".", "difference", "(", "lambda_kwargs", ")", ":", "return", "# The \"ordinary\" arguments must be in a correspondence such that:", "# ordinary_args[i].name == call.args[i].name.", "if", "len", "(", "ordinary_args", ")", "!=", "len", "(", "new_call_args", ")", ":", "return", "for", "arg", ",", "passed_arg", "in", "zip", "(", "ordinary_args", ",", "new_call_args", ")", ":", "if", "not", "isinstance", "(", "passed_arg", ",", "astroid", ".", "Name", ")", ":", "return", "if", "arg", ".", "name", "!=", "passed_arg", ".", "name", ":", "return", "self", ".", "add_message", "(", "\"unnecessary-lambda\"", ",", "line", "=", "node", ".", "fromlineno", ",", "node", "=", "node", ")" ]
check whether or not the lambda is suspicious
[ "check", "whether", "or", "not", "the", "lambda", "is", "suspicious" ]
python
test
42.372881
lawsie/guizero
guizero/ButtonGroup.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/ButtonGroup.py#L262-L276
def remove(self, option): """ Removes the first `option` from the ButtonGroup. Returns `True` if an item was removed. :param string option: The value of the option to remove from the ButtonGroup. """ for existing_option in self._options: if existing_option[1] == option: self._options.remove(existing_option) self._refresh_options() return True return False
[ "def", "remove", "(", "self", ",", "option", ")", ":", "for", "existing_option", "in", "self", ".", "_options", ":", "if", "existing_option", "[", "1", "]", "==", "option", ":", "self", ".", "_options", ".", "remove", "(", "existing_option", ")", "self", ".", "_refresh_options", "(", ")", "return", "True", "return", "False" ]
Removes the first `option` from the ButtonGroup. Returns `True` if an item was removed. :param string option: The value of the option to remove from the ButtonGroup.
[ "Removes", "the", "first", "option", "from", "the", "ButtonGroup", "." ]
python
train
31.533333
latchset/custodia
docs/source/examples/cfgparser.py
https://github.com/latchset/custodia/blob/5ad4cd7a2f40babc6b8b5d16215b7e27ca993b6d/docs/source/examples/cfgparser.py#L109-L117
def getsecret(self, section, option, **kwargs): """Get a secret from Custodia """ # keyword-only arguments, vars and fallback are directly passed through raw = kwargs.get('raw', False) value = self.get(section, option, **kwargs) if raw: return value return self.custodia_client.get_secret(value)
[ "def", "getsecret", "(", "self", ",", "section", ",", "option", ",", "*", "*", "kwargs", ")", ":", "# keyword-only arguments, vars and fallback are directly passed through", "raw", "=", "kwargs", ".", "get", "(", "'raw'", ",", "False", ")", "value", "=", "self", ".", "get", "(", "section", ",", "option", ",", "*", "*", "kwargs", ")", "if", "raw", ":", "return", "value", "return", "self", ".", "custodia_client", ".", "get_secret", "(", "value", ")" ]
Get a secret from Custodia
[ "Get", "a", "secret", "from", "Custodia" ]
python
train
39.444444
ceph/ceph-deploy
ceph_deploy/conf/ceph.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/conf/ceph.py#L34-L46
def safe_get(self, section, key): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: #Use full parent function so we can replace it in the class # if desired return configparser.RawConfigParser.get(self, section, key) except (configparser.NoSectionError, configparser.NoOptionError): return None
[ "def", "safe_get", "(", "self", ",", "section", ",", "key", ")", ":", "try", ":", "#Use full parent function so we can replace it in the class", "# if desired", "return", "configparser", ".", "RawConfigParser", ".", "get", "(", "self", ",", "section", ",", "key", ")", "except", "(", "configparser", ".", "NoSectionError", ",", "configparser", ".", "NoOptionError", ")", ":", "return", "None" ]
Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time.
[ "Attempt", "to", "get", "a", "configuration", "value", "from", "a", "certain", "section", "in", "a", "cfg", "object", "but", "returning", "None", "if", "not", "found", ".", "Avoids", "the", "need", "to", "be", "doing", "try", "/", "except", "{", "ConfigParser", "Exceptions", "}", "every", "time", "." ]
python
train
42.692308
nickjj/ansigenome
ansigenome/utils.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L390-L407
def create_meta_main(create_path, config, role, categories): """ Create a meta template. """ meta_file = c.DEFAULT_META_FILE.replace( "%author_name", config["author_name"]) meta_file = meta_file.replace( "%author_company", config["author_company"]) meta_file = meta_file.replace("%license_type", config["license_type"]) meta_file = meta_file.replace("%role_name", role) # Normalize the category so %categories always gets replaced. if not categories: categories = "" meta_file = meta_file.replace("%categories", categories) string_to_file(create_path, meta_file)
[ "def", "create_meta_main", "(", "create_path", ",", "config", ",", "role", ",", "categories", ")", ":", "meta_file", "=", "c", ".", "DEFAULT_META_FILE", ".", "replace", "(", "\"%author_name\"", ",", "config", "[", "\"author_name\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%author_company\"", ",", "config", "[", "\"author_company\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%license_type\"", ",", "config", "[", "\"license_type\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%role_name\"", ",", "role", ")", "# Normalize the category so %categories always gets replaced.", "if", "not", "categories", ":", "categories", "=", "\"\"", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%categories\"", ",", "categories", ")", "string_to_file", "(", "create_path", ",", "meta_file", ")" ]
Create a meta template.
[ "Create", "a", "meta", "template", "." ]
python
train
34.222222
mcash/merchant-api-python-sdk
mcash/mapi_client/pusherconnector.py
https://github.com/mcash/merchant-api-python-sdk/blob/ebe8734126790354b71077aca519ff263235944e/mcash/mapi_client/pusherconnector.py#L33-L39
def _pusher_connect_handler(self, data): """Event handler for the connection_established event. Binds the shortlink_scanned event """ self.channel = self.pusher.subscribe(self.pos_callback_chan) for listener in self.pusher_connected_listeners: listener(data)
[ "def", "_pusher_connect_handler", "(", "self", ",", "data", ")", ":", "self", ".", "channel", "=", "self", ".", "pusher", ".", "subscribe", "(", "self", ".", "pos_callback_chan", ")", "for", "listener", "in", "self", ".", "pusher_connected_listeners", ":", "listener", "(", "data", ")" ]
Event handler for the connection_established event. Binds the shortlink_scanned event
[ "Event", "handler", "for", "the", "connection_established", "event", ".", "Binds", "the", "shortlink_scanned", "event" ]
python
train
43.428571
abe-winter/pg13-py
pg13/sqparse2.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqparse2.py#L137-L141
def t_NAME(self,t): '[A-Za-z]\w*|\"char\"' # warning: this allows stuff like SeLeCt with mixed case. who cares. t.type = KEYWORDS[t.value.lower()] if t.value.lower() in KEYWORDS else 'BOOL' if t.value.lower() in ('is','not') else 'NAME' return t
[ "def", "t_NAME", "(", "self", ",", "t", ")", ":", "# warning: this allows stuff like SeLeCt with mixed case. who cares.", "t", ".", "type", "=", "KEYWORDS", "[", "t", ".", "value", ".", "lower", "(", ")", "]", "if", "t", ".", "value", ".", "lower", "(", ")", "in", "KEYWORDS", "else", "'BOOL'", "if", "t", ".", "value", ".", "lower", "(", ")", "in", "(", "'is'", ",", "'not'", ")", "else", "'NAME'", "return", "t" ]
[A-Za-z]\w*|\"char\"
[ "[", "A", "-", "Za", "-", "z", "]", "\\", "w", "*", "|", "\\", "char", "\\" ]
python
train
51.4
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L128-L240
def write(self, data, **keys): """ Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns= """ slow = keys.get('slow', False) isrec = False if isinstance(data, (list, dict)): if isinstance(data, list): data_list = data columns_all = keys.get('columns', None) if columns_all is None: columns_all = keys.get('names', None) if columns_all is None: raise ValueError( "you must send columns with a list of arrays") else: columns_all = list(data.keys()) data_list = [data[n] for n in columns_all] colnums_all = [self._extract_colnum(c) for c in columns_all] names = [self.get_colname(c) for c in colnums_all] isobj = numpy.zeros(len(data_list), dtype=numpy.bool) for i in xrange(len(data_list)): isobj[i] = is_object(data_list[i]) else: if data.dtype.fields is None: raise ValueError("You are writing to a table, so I expected " "an array with fields as input. If you want " "to write a simple array, you should use " "write_column to write to a single column, " "or instead write to an image hdu") if data.shape is (): raise ValueError("cannot write data with shape ()") isrec = True names = data.dtype.names # only write object types (variable-length columns) after # writing the main table isobj = fields_are_object(data) data_list = [] colnums_all = [] for i, name in enumerate(names): colnum = self._extract_colnum(name) data_list.append(data[name]) colnums_all.append(colnum) if slow: for i, name in enumerate(names): if not isobj[i]: self.write_column(name, data_list[i], **keys) else: nonobj_colnums = [] nonobj_arrays = [] for i in xrange(len(data_list)): if not isobj[i]: nonobj_colnums.append(colnums_all[i]) if isrec: # this still leaves possibility of f-order sub-arrays.. colref = array_to_native(data_list[i], inplace=False) else: colref = array_to_native_c(data_list[i], inplace=False) if IS_PY3 and colref.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii colref = colref.astype('S', copy=False) nonobj_arrays.append(colref) for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays): self._verify_column_data(tcolnum, tdata) if len(nonobj_arrays) > 0: firstrow = keys.get('firstrow', 0) self._FITS.write_columns( self._ext+1, nonobj_colnums, nonobj_arrays, firstrow=firstrow+1, write_bitcols=self.write_bitcols) # writing the object arrays always occurs the same way # need to make sure this works for array fields for i, name in enumerate(names): if isobj[i]: self.write_var_column(name, data_list[i], **keys) self._update_info()
[ "def", "write", "(", "self", ",", "data", ",", "*", "*", "keys", ")", ":", "slow", "=", "keys", ".", "get", "(", "'slow'", ",", "False", ")", "isrec", "=", "False", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "if", "isinstance", "(", "data", ",", "list", ")", ":", "data_list", "=", "data", "columns_all", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "columns_all", "=", "keys", ".", "get", "(", "'names'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "raise", "ValueError", "(", "\"you must send columns with a list of arrays\"", ")", "else", ":", "columns_all", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "data_list", "=", "[", "data", "[", "n", "]", "for", "n", "in", "columns_all", "]", "colnums_all", "=", "[", "self", ".", "_extract_colnum", "(", "c", ")", "for", "c", "in", "columns_all", "]", "names", "=", "[", "self", ".", "get_colname", "(", "c", ")", "for", "c", "in", "colnums_all", "]", "isobj", "=", "numpy", ".", "zeros", "(", "len", "(", "data_list", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "isobj", "[", "i", "]", "=", "is_object", "(", "data_list", "[", "i", "]", ")", "else", ":", "if", "data", ".", "dtype", ".", "fields", "is", "None", ":", "raise", "ValueError", "(", "\"You are writing to a table, so I expected \"", "\"an array with fields as input. If you want \"", "\"to write a simple array, you should use \"", "\"write_column to write to a single column, \"", "\"or instead write to an image hdu\"", ")", "if", "data", ".", "shape", "is", "(", ")", ":", "raise", "ValueError", "(", "\"cannot write data with shape ()\"", ")", "isrec", "=", "True", "names", "=", "data", ".", "dtype", ".", "names", "# only write object types (variable-length columns) after", "# writing the main table", "isobj", "=", "fields_are_object", "(", "data", ")", "data_list", "=", "[", "]", "colnums_all", "=", "[", "]", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "colnum", "=", "self", ".", "_extract_colnum", "(", "name", ")", "data_list", ".", "append", "(", "data", "[", "name", "]", ")", "colnums_all", ".", "append", "(", "colnum", ")", "if", "slow", ":", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "self", ".", "write_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "else", ":", "nonobj_colnums", "=", "[", "]", "nonobj_arrays", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "nonobj_colnums", ".", "append", "(", "colnums_all", "[", "i", "]", ")", "if", "isrec", ":", "# this still leaves possibility of f-order sub-arrays..", "colref", "=", "array_to_native", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "else", ":", "colref", "=", "array_to_native_c", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "colref", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "colref", "=", "colref", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "nonobj_arrays", ".", "append", "(", "colref", ")", "for", "tcolnum", ",", "tdata", "in", "zip", "(", "nonobj_colnums", ",", "nonobj_arrays", ")", ":", "self", ".", "_verify_column_data", "(", "tcolnum", ",", "tdata", ")", "if", "len", "(", "nonobj_arrays", ")", ">", "0", ":", "firstrow", "=", "keys", ".", "get", "(", "'firstrow'", ",", "0", ")", "self", ".", "_FITS", ".", "write_columns", "(", "self", ".", "_ext", "+", "1", ",", "nonobj_colnums", ",", "nonobj_arrays", ",", "firstrow", "=", "firstrow", "+", "1", ",", "write_bitcols", "=", "self", ".", "write_bitcols", ")", "# writing the object arrays always occurs the same way", "# need to make sure this works for array fields", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "isobj", "[", "i", "]", ":", "self", ".", "write_var_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "self", ".", "_update_info", "(", ")" ]
Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns=
[ "Write", "data", "into", "this", "HDU" ]
python
train
39.345133
Metatab/metapack
metapack/cli/wp.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/wp.py#L143-L263
def publish_wp(site_name, output_file, resources, args): """Publish a notebook to a wordpress post, using Gutenberg blocks. Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter' show_input: hide github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb identifier: 5c987397-a954-46ca-8743-bdcd7a71579c featured_image: 171 authors: - email: [email protected] name: Eric Busboom organization: Civic Knowledge type: wrangler tags: - Tag1 - Tag2 categories: - Demographics - Tutorial 'Featured_image' is an attachment id """ from wordpress_xmlrpc import Client, WordPressPost from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost # http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png url, user, password = get_site_config(site_name) meta = {} for r in resources: if r.endswith('.json'): with open(r) as f: meta = json.load(f) fm = meta.get('frontmatter',{}) if not 'identifier' in fm or not fm['identifier']: err("Can't publish notebook without a unique identifier. Add this to the " "Metatab document or frontmatter metadata:\n identifier: {}".format(str(uuid4()))) wp = Client(url, user, password) post = find_post(wp, fm['identifier']) if post: prt("Updating old post") else: post = WordPressPost() post.id = wp.call(NewPost(post)) prt("Creating new post") post.title = fm.get('title','') post.slug = fm.get('slug') with open(output_file) as f: content = f.read() post.terms_names = { 'post_tag': fm.get('tags',[]), 'category': fm.get('categories',[]) } if args.header: print(yaml.dump(fm, default_flow_style=False)) set_custom_field(post, 'identifier', fm['identifier']) post.excerpt = fm.get('excerpt', fm.get('brief', fm.get('description'))) def strip_image_name(n): """Strip off the version number from the media file""" from os.path import splitext import re return re.sub(r'\-\d+$','',splitext(n)[0]) extant_files = list(wp.call(GetMediaLibrary(dict(parent_id=post.id)))) def find_extant_image(image_name): for img in extant_files: if strip_image_name(basename(img.metadata['file'])) == strip_image_name(image_name): return img return None for r in resources: image_data = prepare_image(fm['identifier'], r, post.id) img_from = "/{}/{}".format(fm['slug'], basename(r)) extant_image = find_extant_image(image_data['name']) if extant_image: prt("Post already has image:", extant_image.id, extant_image.link) img_to = extant_image.link elif r.endswith('.png'): # Foolishly assuming all images are PNGs response = wp.call(UploadFile(image_data, overwrite=True)) prt("Uploaded image {} to id={}, {}".format(basename(r), response['id'], response['link'])) img_to = response['link'] content = content.replace(img_from, img_to) if fm.get('featured_image') and fm.get('featured_image').strip(): post.thumbnail = int(fm['featured_image']) elif hasattr(post, 'thumbnail') and isinstance(post.thumbnail, dict): # The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost post.thumbnail = post.thumbnail['attachment_id'] post.content = content r = wp.call(EditPost(post.id, post)) return r, wp.call(GetPost(post.id))
[ "def", "publish_wp", "(", "site_name", ",", "output_file", ",", "resources", ",", "args", ")", ":", "from", "wordpress_xmlrpc", "import", "Client", ",", "WordPressPost", "from", "wordpress_xmlrpc", ".", "methods", ".", "media", "import", "UploadFile", ",", "GetMediaLibrary", "from", "wordpress_xmlrpc", ".", "methods", ".", "posts", "import", "NewPost", ",", "EditPost", ",", "GetPost", "# http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png", "url", ",", "user", ",", "password", "=", "get_site_config", "(", "site_name", ")", "meta", "=", "{", "}", "for", "r", "in", "resources", ":", "if", "r", ".", "endswith", "(", "'.json'", ")", ":", "with", "open", "(", "r", ")", "as", "f", ":", "meta", "=", "json", ".", "load", "(", "f", ")", "fm", "=", "meta", ".", "get", "(", "'frontmatter'", ",", "{", "}", ")", "if", "not", "'identifier'", "in", "fm", "or", "not", "fm", "[", "'identifier'", "]", ":", "err", "(", "\"Can't publish notebook without a unique identifier. Add this to the \"", "\"Metatab document or frontmatter metadata:\\n identifier: {}\"", ".", "format", "(", "str", "(", "uuid4", "(", ")", ")", ")", ")", "wp", "=", "Client", "(", "url", ",", "user", ",", "password", ")", "post", "=", "find_post", "(", "wp", ",", "fm", "[", "'identifier'", "]", ")", "if", "post", ":", "prt", "(", "\"Updating old post\"", ")", "else", ":", "post", "=", "WordPressPost", "(", ")", "post", ".", "id", "=", "wp", ".", "call", "(", "NewPost", "(", "post", ")", ")", "prt", "(", "\"Creating new post\"", ")", "post", ".", "title", "=", "fm", ".", "get", "(", "'title'", ",", "''", ")", "post", ".", "slug", "=", "fm", ".", "get", "(", "'slug'", ")", "with", "open", "(", "output_file", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "post", ".", "terms_names", "=", "{", "'post_tag'", ":", "fm", ".", "get", "(", "'tags'", ",", "[", "]", ")", ",", "'category'", ":", "fm", ".", "get", "(", "'categories'", ",", "[", "]", ")", "}", "if", "args", ".", "header", ":", "print", "(", "yaml", ".", "dump", "(", "fm", ",", "default_flow_style", "=", "False", ")", ")", "set_custom_field", "(", "post", ",", "'identifier'", ",", "fm", "[", "'identifier'", "]", ")", "post", ".", "excerpt", "=", "fm", ".", "get", "(", "'excerpt'", ",", "fm", ".", "get", "(", "'brief'", ",", "fm", ".", "get", "(", "'description'", ")", ")", ")", "def", "strip_image_name", "(", "n", ")", ":", "\"\"\"Strip off the version number from the media file\"\"\"", "from", "os", ".", "path", "import", "splitext", "import", "re", "return", "re", ".", "sub", "(", "r'\\-\\d+$'", ",", "''", ",", "splitext", "(", "n", ")", "[", "0", "]", ")", "extant_files", "=", "list", "(", "wp", ".", "call", "(", "GetMediaLibrary", "(", "dict", "(", "parent_id", "=", "post", ".", "id", ")", ")", ")", ")", "def", "find_extant_image", "(", "image_name", ")", ":", "for", "img", "in", "extant_files", ":", "if", "strip_image_name", "(", "basename", "(", "img", ".", "metadata", "[", "'file'", "]", ")", ")", "==", "strip_image_name", "(", "image_name", ")", ":", "return", "img", "return", "None", "for", "r", "in", "resources", ":", "image_data", "=", "prepare_image", "(", "fm", "[", "'identifier'", "]", ",", "r", ",", "post", ".", "id", ")", "img_from", "=", "\"/{}/{}\"", ".", "format", "(", "fm", "[", "'slug'", "]", ",", "basename", "(", "r", ")", ")", "extant_image", "=", "find_extant_image", "(", "image_data", "[", "'name'", "]", ")", "if", "extant_image", ":", "prt", "(", "\"Post already has image:\"", ",", "extant_image", ".", "id", ",", "extant_image", ".", "link", ")", "img_to", "=", "extant_image", ".", "link", "elif", "r", ".", "endswith", "(", "'.png'", ")", ":", "# Foolishly assuming all images are PNGs", "response", "=", "wp", ".", "call", "(", "UploadFile", "(", "image_data", ",", "overwrite", "=", "True", ")", ")", "prt", "(", "\"Uploaded image {} to id={}, {}\"", ".", "format", "(", "basename", "(", "r", ")", ",", "response", "[", "'id'", "]", ",", "response", "[", "'link'", "]", ")", ")", "img_to", "=", "response", "[", "'link'", "]", "content", "=", "content", ".", "replace", "(", "img_from", ",", "img_to", ")", "if", "fm", ".", "get", "(", "'featured_image'", ")", "and", "fm", ".", "get", "(", "'featured_image'", ")", ".", "strip", "(", ")", ":", "post", ".", "thumbnail", "=", "int", "(", "fm", "[", "'featured_image'", "]", ")", "elif", "hasattr", "(", "post", ",", "'thumbnail'", ")", "and", "isinstance", "(", "post", ".", "thumbnail", ",", "dict", ")", ":", "# The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost", "post", ".", "thumbnail", "=", "post", ".", "thumbnail", "[", "'attachment_id'", "]", "post", ".", "content", "=", "content", "r", "=", "wp", ".", "call", "(", "EditPost", "(", "post", ".", "id", ",", "post", ")", ")", "return", "r", ",", "wp", ".", "call", "(", "GetPost", "(", "post", ".", "id", ")", ")" ]
Publish a notebook to a wordpress post, using Gutenberg blocks. Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter' show_input: hide github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb identifier: 5c987397-a954-46ca-8743-bdcd7a71579c featured_image: 171 authors: - email: [email protected] name: Eric Busboom organization: Civic Knowledge type: wrangler tags: - Tag1 - Tag2 categories: - Demographics - Tutorial 'Featured_image' is an attachment id
[ "Publish", "a", "notebook", "to", "a", "wordpress", "post", "using", "Gutenberg", "blocks", "." ]
python
train
30.512397
pyca/pynacl
src/nacl/hash.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/hash.py#L86-L118
def blake2b(data, digest_size=BLAKE2B_BYTES, key=b'', salt=b'', person=b'', encoder=nacl.encoding.HexEncoder): """ Hashes ``data`` with blake2b. :param data: the digest input byte sequence :type data: bytes :param digest_size: the requested digest size; must be at most :const:`BLAKE2B_BYTES_MAX`; the default digest size is :const:`BLAKE2B_BYTES` :type digest_size: int :param key: the key to be set for keyed MAC/PRF usage; if set, the key must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long :type key: bytes :param salt: an initialization salt at most :const:`BLAKE2B_SALTBYTES` long; it will be zero-padded if needed :type salt: bytes :param person: a personalization string at most :const:`BLAKE2B_PERSONALBYTES` long; it will be zero-padded if needed :type person: bytes :param encoder: the encoder to use on returned digest :type encoder: class :returns: The hashed message. :rtype: bytes """ digest = _b2b_hash(data, digest_size=digest_size, key=key, salt=salt, person=person) return encoder.encode(digest)
[ "def", "blake2b", "(", "data", ",", "digest_size", "=", "BLAKE2B_BYTES", ",", "key", "=", "b''", ",", "salt", "=", "b''", ",", "person", "=", "b''", ",", "encoder", "=", "nacl", ".", "encoding", ".", "HexEncoder", ")", ":", "digest", "=", "_b2b_hash", "(", "data", ",", "digest_size", "=", "digest_size", ",", "key", "=", "key", ",", "salt", "=", "salt", ",", "person", "=", "person", ")", "return", "encoder", ".", "encode", "(", "digest", ")" ]
Hashes ``data`` with blake2b. :param data: the digest input byte sequence :type data: bytes :param digest_size: the requested digest size; must be at most :const:`BLAKE2B_BYTES_MAX`; the default digest size is :const:`BLAKE2B_BYTES` :type digest_size: int :param key: the key to be set for keyed MAC/PRF usage; if set, the key must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long :type key: bytes :param salt: an initialization salt at most :const:`BLAKE2B_SALTBYTES` long; it will be zero-padded if needed :type salt: bytes :param person: a personalization string at most :const:`BLAKE2B_PERSONALBYTES` long; it will be zero-padded if needed :type person: bytes :param encoder: the encoder to use on returned digest :type encoder: class :returns: The hashed message. :rtype: bytes
[ "Hashes", "data", "with", "blake2b", "." ]
python
train
38.666667
mlperf/training
data_generation/fractal_graph_expansions/util.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/data_generation/fractal_graph_expansions/util.py#L34-L38
def load_df_from_file(file_path, sep=",", header=0): """Wrapper around pandas' read_csv.""" with tf.gfile.Open(file_path) as infile: df = pd.read_csv(infile, sep=sep, header=header) return df
[ "def", "load_df_from_file", "(", "file_path", ",", "sep", "=", "\",\"", ",", "header", "=", "0", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "file_path", ")", "as", "infile", ":", "df", "=", "pd", ".", "read_csv", "(", "infile", ",", "sep", "=", "sep", ",", "header", "=", "header", ")", "return", "df" ]
Wrapper around pandas' read_csv.
[ "Wrapper", "around", "pandas", "read_csv", "." ]
python
train
39.4
SBRG/ssbio
ssbio/core/protein.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L503-L571
def set_representative_sequence(self, force_rerun=False): """Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence. Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings except when KEGG mappings have PDBs associated with them and UniProt doesn't. Args: force_rerun (bool): Set to True to recheck stored sequences Returns: SeqProp: Which sequence was set as representative """ if len(self.sequences) == 0: log.error('{}: no sequences mapped'.format(self.id)) return self.representative_sequence kegg_mappings = self.filter_sequences(KEGGProp) if len(kegg_mappings) > 0: kegg_to_use = kegg_mappings[0] if len(kegg_mappings) > 1: log.warning('{}: multiple KEGG mappings found, using the first entry {}'.format(self.id, kegg_to_use.id)) uniprot_mappings = self.filter_sequences(UniProtProp) # If a representative sequence has already been set, nothing needs to be done if self.representative_sequence and not force_rerun: log.debug('{}: representative sequence already set'.format(self.id)) # If there is a KEGG annotation and no UniProt annotations, set KEGG as representative elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0: self.representative_sequence = kegg_to_use log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id)) # If there are UniProt annotations and no KEGG annotations, set UniProt as representative elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0: # If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs # This way, UniProts with PDBs get ranked to the top, or if no PDBs, reviewed entries u_ranker = [] for u in uniprot_mappings: u_ranker.append((u.id, u.ranking_score())) sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True) best_u_id = sorted_by_second[0][0] best_u = uniprot_mappings.get_by_id(best_u_id) self.representative_sequence = best_u log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id)) # If there are both UniProt and KEGG annotations... elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0: # Use KEGG if the mapped UniProt is unique, and it has PDBs if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot): self.representative_sequence = kegg_to_use log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id)) else: # If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs u_ranker = [] for u in uniprot_mappings: u_ranker.append((u.id, u.ranking_score())) sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True) best_u_id = sorted_by_second[0][0] best_u = uniprot_mappings.get_by_id(best_u_id) self.representative_sequence = best_u log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id)) return self.representative_sequence
[ "def", "set_representative_sequence", "(", "self", ",", "force_rerun", "=", "False", ")", ":", "if", "len", "(", "self", ".", "sequences", ")", "==", "0", ":", "log", ".", "error", "(", "'{}: no sequences mapped'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "self", ".", "representative_sequence", "kegg_mappings", "=", "self", ".", "filter_sequences", "(", "KEGGProp", ")", "if", "len", "(", "kegg_mappings", ")", ">", "0", ":", "kegg_to_use", "=", "kegg_mappings", "[", "0", "]", "if", "len", "(", "kegg_mappings", ")", ">", "1", ":", "log", ".", "warning", "(", "'{}: multiple KEGG mappings found, using the first entry {}'", ".", "format", "(", "self", ".", "id", ",", "kegg_to_use", ".", "id", ")", ")", "uniprot_mappings", "=", "self", ".", "filter_sequences", "(", "UniProtProp", ")", "# If a representative sequence has already been set, nothing needs to be done", "if", "self", ".", "representative_sequence", "and", "not", "force_rerun", ":", "log", ".", "debug", "(", "'{}: representative sequence already set'", ".", "format", "(", "self", ".", "id", ")", ")", "# If there is a KEGG annotation and no UniProt annotations, set KEGG as representative", "elif", "len", "(", "kegg_mappings", ")", ">", "0", "and", "len", "(", "uniprot_mappings", ")", "==", "0", ":", "self", ".", "representative_sequence", "=", "kegg_to_use", "log", ".", "debug", "(", "'{}: representative sequence set from KEGG ID {}'", ".", "format", "(", "self", ".", "id", ",", "kegg_to_use", ".", "id", ")", ")", "# If there are UniProt annotations and no KEGG annotations, set UniProt as representative", "elif", "len", "(", "kegg_mappings", ")", "==", "0", "and", "len", "(", "uniprot_mappings", ")", ">", "0", ":", "# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs", "# This way, UniProts with PDBs get ranked to the top, or if no PDBs, reviewed entries", "u_ranker", "=", "[", "]", "for", "u", "in", "uniprot_mappings", ":", "u_ranker", ".", "append", "(", "(", "u", ".", "id", ",", "u", ".", "ranking_score", "(", ")", ")", ")", "sorted_by_second", "=", "sorted", "(", "u_ranker", ",", "key", "=", "lambda", "tup", ":", "tup", "[", "1", "]", ",", "reverse", "=", "True", ")", "best_u_id", "=", "sorted_by_second", "[", "0", "]", "[", "0", "]", "best_u", "=", "uniprot_mappings", ".", "get_by_id", "(", "best_u_id", ")", "self", ".", "representative_sequence", "=", "best_u", "log", ".", "debug", "(", "'{}: representative sequence set from UniProt ID {}'", ".", "format", "(", "self", ".", "id", ",", "best_u_id", ")", ")", "# If there are both UniProt and KEGG annotations...", "elif", "len", "(", "kegg_mappings", ")", ">", "0", "and", "len", "(", "uniprot_mappings", ")", ">", "0", ":", "# Use KEGG if the mapped UniProt is unique, and it has PDBs", "if", "kegg_to_use", ".", "num_pdbs", ">", "0", "and", "not", "uniprot_mappings", ".", "has_id", "(", "kegg_to_use", ".", "uniprot", ")", ":", "self", ".", "representative_sequence", "=", "kegg_to_use", "log", ".", "debug", "(", "'{}: representative sequence set from KEGG ID {}'", ".", "format", "(", "self", ".", "id", ",", "kegg_to_use", ".", "id", ")", ")", "else", ":", "# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs", "u_ranker", "=", "[", "]", "for", "u", "in", "uniprot_mappings", ":", "u_ranker", ".", "append", "(", "(", "u", ".", "id", ",", "u", ".", "ranking_score", "(", ")", ")", ")", "sorted_by_second", "=", "sorted", "(", "u_ranker", ",", "key", "=", "lambda", "tup", ":", "tup", "[", "1", "]", ",", "reverse", "=", "True", ")", "best_u_id", "=", "sorted_by_second", "[", "0", "]", "[", "0", "]", "best_u", "=", "uniprot_mappings", ".", "get_by_id", "(", "best_u_id", ")", "self", ".", "representative_sequence", "=", "best_u", "log", ".", "debug", "(", "'{}: representative sequence set from UniProt ID {}'", ".", "format", "(", "self", ".", "id", ",", "best_u_id", ")", ")", "return", "self", ".", "representative_sequence" ]
Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence. Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings except when KEGG mappings have PDBs associated with them and UniProt doesn't. Args: force_rerun (bool): Set to True to recheck stored sequences Returns: SeqProp: Which sequence was set as representative
[ "Automatically", "consolidate", "loaded", "sequences", "(", "manual", "UniProt", "or", "KEGG", ")", "and", "set", "a", "single", "representative", "sequence", "." ]
python
train
51.101449
yt-project/unyt
unyt/array.py
https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/array.py#L2041-L2059
def udot(op1, op2): """Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. Examples -------- >>> from unyt import km, s >>> a = np.eye(2)*km >>> b = (np.ones((2, 2)) * 2)*s >>> print(udot(a, b)) [[2. 2.] [2. 2.]] km*s """ dot = np.dot(op1.d, op2.d) units = op1.units * op2.units if dot.shape == (): return unyt_quantity(dot, units) return unyt_array(dot, units)
[ "def", "udot", "(", "op1", ",", "op2", ")", ":", "dot", "=", "np", ".", "dot", "(", "op1", ".", "d", ",", "op2", ".", "d", ")", "units", "=", "op1", ".", "units", "*", "op2", ".", "units", "if", "dot", ".", "shape", "==", "(", ")", ":", "return", "unyt_quantity", "(", "dot", ",", "units", ")", "return", "unyt_array", "(", "dot", ",", "units", ")" ]
Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. Examples -------- >>> from unyt import km, s >>> a = np.eye(2)*km >>> b = (np.ones((2, 2)) * 2)*s >>> print(udot(a, b)) [[2. 2.] [2. 2.]] km*s
[ "Matrix", "or", "vector", "dot", "product", "that", "preserves", "units" ]
python
train
24.526316
mgraffg/EvoDAG
EvoDAG/population.py
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L402-L419
def replace(self, v): """Replace an individual selected by negative tournament selection with individual v""" if self.popsize < self._popsize: return self.add(v) k = self.tournament(negative=True) self.clean(self.population[k]) self.population[k] = v v.position = len(self._hist) self._hist.append(v) self.bsf = v self.estopping = v self._inds_replace += 1 self._density += self.get_density(v) if self._inds_replace == self._popsize: self._inds_replace = 0 self.generation += 1 gc.collect()
[ "def", "replace", "(", "self", ",", "v", ")", ":", "if", "self", ".", "popsize", "<", "self", ".", "_popsize", ":", "return", "self", ".", "add", "(", "v", ")", "k", "=", "self", ".", "tournament", "(", "negative", "=", "True", ")", "self", ".", "clean", "(", "self", ".", "population", "[", "k", "]", ")", "self", ".", "population", "[", "k", "]", "=", "v", "v", ".", "position", "=", "len", "(", "self", ".", "_hist", ")", "self", ".", "_hist", ".", "append", "(", "v", ")", "self", ".", "bsf", "=", "v", "self", ".", "estopping", "=", "v", "self", ".", "_inds_replace", "+=", "1", "self", ".", "_density", "+=", "self", ".", "get_density", "(", "v", ")", "if", "self", ".", "_inds_replace", "==", "self", ".", "_popsize", ":", "self", ".", "_inds_replace", "=", "0", "self", ".", "generation", "+=", "1", "gc", ".", "collect", "(", ")" ]
Replace an individual selected by negative tournament selection with individual v
[ "Replace", "an", "individual", "selected", "by", "negative", "tournament", "selection", "with", "individual", "v" ]
python
train
34.722222
jwkvam/bowtie
bowtie/_magic.py
https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_magic.py#L27-L47
def get_notebook_name() -> str: """Return the full path of the jupyter notebook. References ---------- https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246 """ kernel_id = re.search( # type: ignore 'kernel-(.*).json', ipykernel.connect.get_connection_file() ).group(1) servers = list_running_servers() for server in servers: response = requests.get(urljoin(server['url'], 'api/sessions'), params={'token': server.get('token', '')}) for session in json.loads(response.text): if session['kernel']['id'] == kernel_id: relative_path = session['notebook']['path'] return pjoin(server['notebook_dir'], relative_path) raise Exception('Noteboook not found.')
[ "def", "get_notebook_name", "(", ")", "->", "str", ":", "kernel_id", "=", "re", ".", "search", "(", "# type: ignore", "'kernel-(.*).json'", ",", "ipykernel", ".", "connect", ".", "get_connection_file", "(", ")", ")", ".", "group", "(", "1", ")", "servers", "=", "list_running_servers", "(", ")", "for", "server", "in", "servers", ":", "response", "=", "requests", ".", "get", "(", "urljoin", "(", "server", "[", "'url'", "]", ",", "'api/sessions'", ")", ",", "params", "=", "{", "'token'", ":", "server", ".", "get", "(", "'token'", ",", "''", ")", "}", ")", "for", "session", "in", "json", ".", "loads", "(", "response", ".", "text", ")", ":", "if", "session", "[", "'kernel'", "]", "[", "'id'", "]", "==", "kernel_id", ":", "relative_path", "=", "session", "[", "'notebook'", "]", "[", "'path'", "]", "return", "pjoin", "(", "server", "[", "'notebook_dir'", "]", ",", "relative_path", ")", "raise", "Exception", "(", "'Noteboook not found.'", ")" ]
Return the full path of the jupyter notebook. References ---------- https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246
[ "Return", "the", "full", "path", "of", "the", "jupyter", "notebook", "." ]
python
train
38.047619
Toilal/rebulk
rebulk/rebulk.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/rebulk.py#L146-L156
def string(self, *pattern, **kwargs): """ Add string pattern :param pattern: :type pattern: :return: self :rtype: Rebulk """ self.pattern(self.build_string(*pattern, **kwargs)) return self
[ "def", "string", "(", "self", ",", "*", "pattern", ",", "*", "*", "kwargs", ")", ":", "self", ".", "pattern", "(", "self", ".", "build_string", "(", "*", "pattern", ",", "*", "*", "kwargs", ")", ")", "return", "self" ]
Add string pattern :param pattern: :type pattern: :return: self :rtype: Rebulk
[ "Add", "string", "pattern" ]
python
train
22.818182
PyMLGame/pymlgame
pymlgame/surface.py
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L34-L44
def draw_dot(self, pos, color): """ Draw one single dot with the given color on the screen. :param pos: Position of the dot :param color: COlor for the dot :type pos: tuple :type color: tuple """ if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: self.matrix[pos[0]][pos[1]] = color
[ "def", "draw_dot", "(", "self", ",", "pos", ",", "color", ")", ":", "if", "0", "<=", "pos", "[", "0", "]", "<", "self", ".", "width", "and", "0", "<=", "pos", "[", "1", "]", "<", "self", ".", "height", ":", "self", ".", "matrix", "[", "pos", "[", "0", "]", "]", "[", "pos", "[", "1", "]", "]", "=", "color" ]
Draw one single dot with the given color on the screen. :param pos: Position of the dot :param color: COlor for the dot :type pos: tuple :type color: tuple
[ "Draw", "one", "single", "dot", "with", "the", "given", "color", "on", "the", "screen", "." ]
python
train
32.454545
CenturyLinkCloud/clc-python-sdk
src/clc/APIv2/account.py
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/account.py#L94-L104
def PrimaryDatacenter(self): """Returns the primary datacenter object associated with the account. >>> clc.v2.Account(alias='BTDI').PrimaryDatacenter() <clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18> >>> print _ WA1 """ return(clc.v2.Datacenter(alias=self.alias,location=self.data['primaryDataCenter'], session=self.session))
[ "def", "PrimaryDatacenter", "(", "self", ")", ":", "return", "(", "clc", ".", "v2", ".", "Datacenter", "(", "alias", "=", "self", ".", "alias", ",", "location", "=", "self", ".", "data", "[", "'primaryDataCenter'", "]", ",", "session", "=", "self", ".", "session", ")", ")" ]
Returns the primary datacenter object associated with the account. >>> clc.v2.Account(alias='BTDI').PrimaryDatacenter() <clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18> >>> print _ WA1
[ "Returns", "the", "primary", "datacenter", "object", "associated", "with", "the", "account", "." ]
python
train
34.909091
Cito/DBUtils
DBUtils/SteadyPg.py
https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/SteadyPg.py#L230-L242
def begin(self, sql=None): """Begin a transaction.""" self._transaction = True try: begin = self._con.begin except AttributeError: return self._con.query(sql or 'begin') else: # use existing method if available if sql: return begin(sql=sql) else: return begin()
[ "def", "begin", "(", "self", ",", "sql", "=", "None", ")", ":", "self", ".", "_transaction", "=", "True", "try", ":", "begin", "=", "self", ".", "_con", ".", "begin", "except", "AttributeError", ":", "return", "self", ".", "_con", ".", "query", "(", "sql", "or", "'begin'", ")", "else", ":", "# use existing method if available", "if", "sql", ":", "return", "begin", "(", "sql", "=", "sql", ")", "else", ":", "return", "begin", "(", ")" ]
Begin a transaction.
[ "Begin", "a", "transaction", "." ]
python
train
29.307692
manns/pyspread
pyspread/src/gui/_widgets.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L637-L641
def OnMeasureItem(self, item): """Returns the height of the items in the popup""" item_name = self.GetItems()[item] return icons[item_name].GetHeight()
[ "def", "OnMeasureItem", "(", "self", ",", "item", ")", ":", "item_name", "=", "self", ".", "GetItems", "(", ")", "[", "item", "]", "return", "icons", "[", "item_name", "]", ".", "GetHeight", "(", ")" ]
Returns the height of the items in the popup
[ "Returns", "the", "height", "of", "the", "items", "in", "the", "popup" ]
python
train
34.4
lightning-viz/lightning-python
lightning/types/base.py
https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L80-L106
def _clean_data(cls, *args, **kwargs): """ Convert raw data into a dictionary with plot-type specific methods. The result of the cleaning operation should be a dictionary. If the dictionary contains a 'data' field it will be passed directly (ensuring appropriate formatting). Otherwise, it should be a dictionary of data-type specific array data (e.g. 'points', 'timeseries'), which will be labeled appropriately (see _check_unkeyed_arrays). """ datadict = cls.clean(*args, **kwargs) if 'data' in datadict: data = datadict['data'] data = cls._ensure_dict_or_list(data) else: data = {} for key in datadict: if key == 'images': data[key] = datadict[key] else: d = cls._ensure_dict_or_list(datadict[key]) data[key] = cls._check_unkeyed_arrays(key, d) return data
[ "def", "_clean_data", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "datadict", "=", "cls", ".", "clean", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "'data'", "in", "datadict", ":", "data", "=", "datadict", "[", "'data'", "]", "data", "=", "cls", ".", "_ensure_dict_or_list", "(", "data", ")", "else", ":", "data", "=", "{", "}", "for", "key", "in", "datadict", ":", "if", "key", "==", "'images'", ":", "data", "[", "key", "]", "=", "datadict", "[", "key", "]", "else", ":", "d", "=", "cls", ".", "_ensure_dict_or_list", "(", "datadict", "[", "key", "]", ")", "data", "[", "key", "]", "=", "cls", ".", "_check_unkeyed_arrays", "(", "key", ",", "d", ")", "return", "data" ]
Convert raw data into a dictionary with plot-type specific methods. The result of the cleaning operation should be a dictionary. If the dictionary contains a 'data' field it will be passed directly (ensuring appropriate formatting). Otherwise, it should be a dictionary of data-type specific array data (e.g. 'points', 'timeseries'), which will be labeled appropriately (see _check_unkeyed_arrays).
[ "Convert", "raw", "data", "into", "a", "dictionary", "with", "plot", "-", "type", "specific", "methods", "." ]
python
train
36.37037
apache/spark
python/pyspark/ml/clustering.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L522-L531
def summary(self): """ Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the training set. An exception is thrown if no summary exists. """ if self.hasSummary: return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__)
[ "def", "summary", "(", "self", ")", ":", "if", "self", ".", "hasSummary", ":", "return", "BisectingKMeansSummary", "(", "super", "(", "BisectingKMeansModel", ",", "self", ")", ".", "summary", ")", "else", ":", "raise", "RuntimeError", "(", "\"No training summary available for this %s\"", "%", "self", ".", "__class__", ".", "__name__", ")" ]
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the training set. An exception is thrown if no summary exists.
[ "Gets", "summary", "(", "e", ".", "g", ".", "cluster", "assignments", "cluster", "sizes", ")", "of", "the", "model", "trained", "on", "the", "training", "set", ".", "An", "exception", "is", "thrown", "if", "no", "summary", "exists", "." ]
python
train
45.1
nerdvegas/rez
src/rez/vendor/version/version.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/version.py#L1033-L1040
def iter_intersecting(self, iterable, key=None, descending=False): """Like `iter_intersect_test`, but returns intersections only. Returns: An iterator that returns items from `iterable` that intersect. """ return _ContainsVersionIterator(self, iterable, key, descending, mode=_ContainsVersionIterator.MODE_INTERSECTING)
[ "def", "iter_intersecting", "(", "self", ",", "iterable", ",", "key", "=", "None", ",", "descending", "=", "False", ")", ":", "return", "_ContainsVersionIterator", "(", "self", ",", "iterable", ",", "key", ",", "descending", ",", "mode", "=", "_ContainsVersionIterator", ".", "MODE_INTERSECTING", ")" ]
Like `iter_intersect_test`, but returns intersections only. Returns: An iterator that returns items from `iterable` that intersect.
[ "Like", "iter_intersect_test", "but", "returns", "intersections", "only", "." ]
python
train
46.125
aliyun/aliyun-odps-python-sdk
odps/df/expr/window.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/window.py#L515-L524
def cume_dist(expr, sort=None, ascending=True): """ Calculate cumulative ratio of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :return: calculated column """ return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending)
[ "def", "cume_dist", "(", "expr", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "return", "_rank_op", "(", "expr", ",", "CumeDist", ",", "types", ".", "float64", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")" ]
Calculate cumulative ratio of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :return: calculated column
[ "Calculate", "cumulative", "ratio", "of", "a", "sequence", "expression", "." ]
python
train
36.8
mikedh/trimesh
trimesh/ray/ray_util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/ray/ray_util.py#L9-L132
def contains_points(intersector, points, check_direction=None): """ Check if a mesh contains a set of points, using ray tests. If the point is on the surface of the mesh, behavior is undefined. Parameters --------- mesh: Trimesh object points: (n,3) points in space Returns --------- contains : (n) bool Whether point is inside mesh or not """ # convert points to float and make sure they are 3D points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)') # placeholder result with no hits we'll fill in later contains = np.zeros(len(points), dtype=np.bool) # cull points outside of the axis aligned bounding box # this avoids running ray tests unless points are close inside_aabb = bounds.contains(intersector.mesh.bounds, points) # if everything is outside the AABB, exit early if not inside_aabb.any(): return contains # default ray direction is random, but we are not generating # uniquely each time so the behavior of this function is easier to debug default_direction = np.array([0.4395064455, 0.617598629942, 0.652231566745]) if check_direction is None: # if no check direction is specified use the default # stack it only for points inside the AABB ray_directions = np.tile(default_direction, (inside_aabb.sum(), 1)) else: # if a direction is passed use it ray_directions = np.tile( np.array(check_direction).reshape(3), (inside_aabb.sum(), 1)) # cast a ray both forwards and backwards location, index_ray, c = intersector.intersects_location( np.vstack( (points[inside_aabb], points[inside_aabb])), np.vstack( (ray_directions, -ray_directions))) # if we hit nothing in either direction just return with no hits if len(index_ray) == 0: return contains # reshape so bi_hits[0] is the result in the forward direction and # bi_hits[1] is the result in the backwards directions bi_hits = np.bincount( index_ray, minlength=len(ray_directions) * 2).reshape((2, -1)) # a point is probably inside if it hits a surface an odd number of times bi_contains = np.mod(bi_hits, 2) == 1 # if the mod of the hit count is the same in both # directions, we can save that result and move on agree = np.equal(*bi_contains) # in order to do an assignment we can only have one # level of boolean indexes, for example this doesn't work: # contains[inside_aabb][agree] = bi_contains[0][agree] # no error is thrown, but nothing gets assigned # to get around that, we create a single mask for assignment mask = inside_aabb.copy() mask[mask] = agree # set contains flags for things inside the AABB and who have # ray tests that agree in both directions contains[mask] = bi_contains[0][agree] # if one of the rays in either direction hit nothing # it is a very solid indicator we are in free space # as the edge cases we are working around tend to # add hits rather than miss hits one_freespace = (bi_hits == 0).any(axis=0) # rays where they don't agree and one isn't in free space # are deemed to be broken broken = np.logical_and(np.logical_not(agree), np.logical_not(one_freespace)) # if all rays agree return if not broken.any(): return contains # try to run again with a new random vector # only do it if check_direction isn't specified # to avoid infinite recursion if check_direction is None: # we're going to run the check again in a random direction new_direction = util.unitize(np.random.random(3) - .5) # do the mask trick again to be able to assign results mask = inside_aabb.copy() mask[mask] = broken contains[mask] = contains_points( intersector, points[inside_aabb][broken], check_direction=new_direction) constants.log.debug( 'detected %d broken contains test, attempted to fix', broken.sum()) return contains
[ "def", "contains_points", "(", "intersector", ",", "points", ",", "check_direction", "=", "None", ")", ":", "# convert points to float and make sure they are 3D", "points", "=", "np", ".", "asanyarray", "(", "points", ",", "dtype", "=", "np", ".", "float64", ")", "if", "not", "util", ".", "is_shape", "(", "points", ",", "(", "-", "1", ",", "3", ")", ")", ":", "raise", "ValueError", "(", "'points must be (n,3)'", ")", "# placeholder result with no hits we'll fill in later", "contains", "=", "np", ".", "zeros", "(", "len", "(", "points", ")", ",", "dtype", "=", "np", ".", "bool", ")", "# cull points outside of the axis aligned bounding box", "# this avoids running ray tests unless points are close", "inside_aabb", "=", "bounds", ".", "contains", "(", "intersector", ".", "mesh", ".", "bounds", ",", "points", ")", "# if everything is outside the AABB, exit early", "if", "not", "inside_aabb", ".", "any", "(", ")", ":", "return", "contains", "# default ray direction is random, but we are not generating", "# uniquely each time so the behavior of this function is easier to debug", "default_direction", "=", "np", ".", "array", "(", "[", "0.4395064455", ",", "0.617598629942", ",", "0.652231566745", "]", ")", "if", "check_direction", "is", "None", ":", "# if no check direction is specified use the default", "# stack it only for points inside the AABB", "ray_directions", "=", "np", ".", "tile", "(", "default_direction", ",", "(", "inside_aabb", ".", "sum", "(", ")", ",", "1", ")", ")", "else", ":", "# if a direction is passed use it", "ray_directions", "=", "np", ".", "tile", "(", "np", ".", "array", "(", "check_direction", ")", ".", "reshape", "(", "3", ")", ",", "(", "inside_aabb", ".", "sum", "(", ")", ",", "1", ")", ")", "# cast a ray both forwards and backwards", "location", ",", "index_ray", ",", "c", "=", "intersector", ".", "intersects_location", "(", "np", ".", "vstack", "(", "(", "points", "[", "inside_aabb", "]", ",", "points", "[", "inside_aabb", "]", ")", ")", ",", "np", ".", "vstack", "(", "(", "ray_directions", ",", "-", "ray_directions", ")", ")", ")", "# if we hit nothing in either direction just return with no hits", "if", "len", "(", "index_ray", ")", "==", "0", ":", "return", "contains", "# reshape so bi_hits[0] is the result in the forward direction and", "# bi_hits[1] is the result in the backwards directions", "bi_hits", "=", "np", ".", "bincount", "(", "index_ray", ",", "minlength", "=", "len", "(", "ray_directions", ")", "*", "2", ")", ".", "reshape", "(", "(", "2", ",", "-", "1", ")", ")", "# a point is probably inside if it hits a surface an odd number of times", "bi_contains", "=", "np", ".", "mod", "(", "bi_hits", ",", "2", ")", "==", "1", "# if the mod of the hit count is the same in both", "# directions, we can save that result and move on", "agree", "=", "np", ".", "equal", "(", "*", "bi_contains", ")", "# in order to do an assignment we can only have one", "# level of boolean indexes, for example this doesn't work:", "# contains[inside_aabb][agree] = bi_contains[0][agree]", "# no error is thrown, but nothing gets assigned", "# to get around that, we create a single mask for assignment", "mask", "=", "inside_aabb", ".", "copy", "(", ")", "mask", "[", "mask", "]", "=", "agree", "# set contains flags for things inside the AABB and who have", "# ray tests that agree in both directions", "contains", "[", "mask", "]", "=", "bi_contains", "[", "0", "]", "[", "agree", "]", "# if one of the rays in either direction hit nothing", "# it is a very solid indicator we are in free space", "# as the edge cases we are working around tend to", "# add hits rather than miss hits", "one_freespace", "=", "(", "bi_hits", "==", "0", ")", ".", "any", "(", "axis", "=", "0", ")", "# rays where they don't agree and one isn't in free space", "# are deemed to be broken", "broken", "=", "np", ".", "logical_and", "(", "np", ".", "logical_not", "(", "agree", ")", ",", "np", ".", "logical_not", "(", "one_freespace", ")", ")", "# if all rays agree return", "if", "not", "broken", ".", "any", "(", ")", ":", "return", "contains", "# try to run again with a new random vector", "# only do it if check_direction isn't specified", "# to avoid infinite recursion", "if", "check_direction", "is", "None", ":", "# we're going to run the check again in a random direction", "new_direction", "=", "util", ".", "unitize", "(", "np", ".", "random", ".", "random", "(", "3", ")", "-", ".5", ")", "# do the mask trick again to be able to assign results", "mask", "=", "inside_aabb", ".", "copy", "(", ")", "mask", "[", "mask", "]", "=", "broken", "contains", "[", "mask", "]", "=", "contains_points", "(", "intersector", ",", "points", "[", "inside_aabb", "]", "[", "broken", "]", ",", "check_direction", "=", "new_direction", ")", "constants", ".", "log", ".", "debug", "(", "'detected %d broken contains test, attempted to fix'", ",", "broken", ".", "sum", "(", ")", ")", "return", "contains" ]
Check if a mesh contains a set of points, using ray tests. If the point is on the surface of the mesh, behavior is undefined. Parameters --------- mesh: Trimesh object points: (n,3) points in space Returns --------- contains : (n) bool Whether point is inside mesh or not
[ "Check", "if", "a", "mesh", "contains", "a", "set", "of", "points", "using", "ray", "tests", "." ]
python
train
35.104839
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1962-L2039
def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper
[ "def", "_bool_method_SERIES", "(", "cls", ",", "op", ",", "special", ")", ":", "op_name", "=", "_get_op_name", "(", "op", ",", "special", ")", "def", "na_op", "(", "x", ",", "y", ")", ":", "try", ":", "result", "=", "op", "(", "x", ",", "y", ")", "except", "TypeError", ":", "assert", "not", "isinstance", "(", "y", ",", "(", "list", ",", "ABCSeries", ",", "ABCIndexClass", ")", ")", "if", "isinstance", "(", "y", ",", "np", ".", "ndarray", ")", ":", "# bool-bool dtype operations should be OK, should not get here", "assert", "not", "(", "is_bool_dtype", "(", "x", ")", "and", "is_bool_dtype", "(", "y", ")", ")", "x", "=", "ensure_object", "(", "x", ")", "y", "=", "ensure_object", "(", "y", ")", "result", "=", "libops", ".", "vec_binop", "(", "x", ",", "y", ",", "op", ")", "else", ":", "# let null fall thru", "assert", "lib", ".", "is_scalar", "(", "y", ")", "if", "not", "isna", "(", "y", ")", ":", "y", "=", "bool", "(", "y", ")", "try", ":", "result", "=", "libops", ".", "scalar_binop", "(", "x", ",", "y", ",", "op", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ",", "OverflowError", ",", "NotImplementedError", ")", ":", "raise", "TypeError", "(", "\"cannot compare a dtyped [{dtype}] array \"", "\"with a scalar of type [{typ}]\"", ".", "format", "(", "dtype", "=", "x", ".", "dtype", ",", "typ", "=", "type", "(", "y", ")", ".", "__name__", ")", ")", "return", "result", "fill_int", "=", "lambda", "x", ":", "x", ".", "fillna", "(", "0", ")", "fill_bool", "=", "lambda", "x", ":", "x", ".", "fillna", "(", "False", ")", ".", "astype", "(", "bool", ")", "def", "wrapper", "(", "self", ",", "other", ")", ":", "is_self_int_dtype", "=", "is_integer_dtype", "(", "self", ".", "dtype", ")", "self", ",", "other", "=", "_align_method_SERIES", "(", "self", ",", "other", ",", "align_asobject", "=", "True", ")", "res_name", "=", "get_op_result_name", "(", "self", ",", "other", ")", "if", "isinstance", "(", "other", ",", "ABCDataFrame", ")", ":", "# Defer to DataFrame implementation; fail early", "return", "NotImplemented", "elif", "isinstance", "(", "other", ",", "(", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "is_other_int_dtype", "=", "is_integer_dtype", "(", "other", ".", "dtype", ")", "other", "=", "fill_int", "(", "other", ")", "if", "is_other_int_dtype", "else", "fill_bool", "(", "other", ")", "ovalues", "=", "other", ".", "values", "finalizer", "=", "lambda", "x", ":", "x", "else", ":", "# scalars, list, tuple, np.array", "is_other_int_dtype", "=", "is_integer_dtype", "(", "np", ".", "asarray", "(", "other", ")", ")", "if", "is_list_like", "(", "other", ")", "and", "not", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", ":", "# TODO: Can we do this before the is_integer_dtype check?", "# could the is_integer_dtype check be checking the wrong", "# thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?", "other", "=", "construct_1d_object_array_from_listlike", "(", "other", ")", "ovalues", "=", "other", "finalizer", "=", "lambda", "x", ":", "x", ".", "__finalize__", "(", "self", ")", "# For int vs int `^`, `|`, `&` are bitwise operators and return", "# integer dtypes. Otherwise these are boolean ops", "filler", "=", "(", "fill_int", "if", "is_self_int_dtype", "and", "is_other_int_dtype", "else", "fill_bool", ")", "res_values", "=", "na_op", "(", "self", ".", "values", ",", "ovalues", ")", "unfilled", "=", "self", ".", "_constructor", "(", "res_values", ",", "index", "=", "self", ".", "index", ",", "name", "=", "res_name", ")", "filled", "=", "filler", "(", "unfilled", ")", "return", "finalizer", "(", "filled", ")", "wrapper", ".", "__name__", "=", "op_name", "return", "wrapper" ]
Wrapper function for Series arithmetic operations, to avoid code duplication.
[ "Wrapper", "function", "for", "Series", "arithmetic", "operations", "to", "avoid", "code", "duplication", "." ]
python
train
39.25641
openeemeter/eemeter
eemeter/caltrack/usage_per_day.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L876-L922
def get_parameter_p_value_too_high_warning( model_type, model_params, parameter, p_value, maximum_p_value ): """ Return an empty list or a single warning wrapped in a list indicating whether model parameter p-value is too high. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. p_value : :any:`float` The p-value of the parameter. maximum_p_value : :any:`float` The maximum allowable p-value of the parameter. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ warnings = [] if p_value > maximum_p_value: data = { "{}_p_value".format(parameter): p_value, "{}_maximum_p_value".format(parameter): maximum_p_value, } data.update(model_params) warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format( model_type=model_type, parameter=parameter ) ), description=( "Model fit {parameter} p-value is too high. Candidate model rejected.".format( parameter=parameter ) ), data=data, ) ) return warnings
[ "def", "get_parameter_p_value_too_high_warning", "(", "model_type", ",", "model_params", ",", "parameter", ",", "p_value", ",", "maximum_p_value", ")", ":", "warnings", "=", "[", "]", "if", "p_value", ">", "maximum_p_value", ":", "data", "=", "{", "\"{}_p_value\"", ".", "format", "(", "parameter", ")", ":", "p_value", ",", "\"{}_maximum_p_value\"", ".", "format", "(", "parameter", ")", ":", "maximum_p_value", ",", "}", "data", ".", "update", "(", "model_params", ")", "warnings", ".", "append", "(", "EEMeterWarning", "(", "qualified_name", "=", "(", "\"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high\"", ".", "format", "(", "model_type", "=", "model_type", ",", "parameter", "=", "parameter", ")", ")", ",", "description", "=", "(", "\"Model fit {parameter} p-value is too high. Candidate model rejected.\"", ".", "format", "(", "parameter", "=", "parameter", ")", ")", ",", "data", "=", "data", ",", ")", ")", "return", "warnings" ]
Return an empty list or a single warning wrapped in a list indicating whether model parameter p-value is too high. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. p_value : :any:`float` The p-value of the parameter. maximum_p_value : :any:`float` The maximum allowable p-value of the parameter. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
[ "Return", "an", "empty", "list", "or", "a", "single", "warning", "wrapped", "in", "a", "list", "indicating", "whether", "model", "parameter", "p", "-", "value", "is", "too", "high", "." ]
python
train
34.234043
MultipedRobotics/pyxl320
bin/servo_ping.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/bin/servo_ping.py#L45-L87
def sweep(port, rate, ID, retry=3): """ Sends a ping packet to ID's from 0 to maximum and prints out any returned messages. Actually send a broadcast and will retry (resend) the ping 3 times ... """ if port == 'dummy': s = ServoSerial(port, rate, fake=True) else: s = ServoSerial(port, rate) if ID < 0: ID = xl320.XL320_BROADCAST_ADDR try: s.open() except SerialException as e: # print('Error opening serial port:') print('-'*40) print(sys.argv[0], ':') print(e) exit(1) pkt = makePingPacket(ID) # print('ping', pkt) s.write(pkt) # as more servos add up, I might need to increase the cnt number??? for cnt in range(retry): ans = s.read() if ans: for pkt in ans: servo = packetToDict(pkt) utils.prettyPrintPacket(servo) print('raw pkt: {}'.format(pkt)) else: print('Try {}: no servos found'.format(cnt)) time.sleep(0.1) s.close()
[ "def", "sweep", "(", "port", ",", "rate", ",", "ID", ",", "retry", "=", "3", ")", ":", "if", "port", "==", "'dummy'", ":", "s", "=", "ServoSerial", "(", "port", ",", "rate", ",", "fake", "=", "True", ")", "else", ":", "s", "=", "ServoSerial", "(", "port", ",", "rate", ")", "if", "ID", "<", "0", ":", "ID", "=", "xl320", ".", "XL320_BROADCAST_ADDR", "try", ":", "s", ".", "open", "(", ")", "except", "SerialException", "as", "e", ":", "# print('Error opening serial port:')", "print", "(", "'-'", "*", "40", ")", "print", "(", "sys", ".", "argv", "[", "0", "]", ",", "':'", ")", "print", "(", "e", ")", "exit", "(", "1", ")", "pkt", "=", "makePingPacket", "(", "ID", ")", "# print('ping', pkt)", "s", ".", "write", "(", "pkt", ")", "# as more servos add up, I might need to increase the cnt number???", "for", "cnt", "in", "range", "(", "retry", ")", ":", "ans", "=", "s", ".", "read", "(", ")", "if", "ans", ":", "for", "pkt", "in", "ans", ":", "servo", "=", "packetToDict", "(", "pkt", ")", "utils", ".", "prettyPrintPacket", "(", "servo", ")", "print", "(", "'raw pkt: {}'", ".", "format", "(", "pkt", ")", ")", "else", ":", "print", "(", "'Try {}: no servos found'", ".", "format", "(", "cnt", ")", ")", "time", ".", "sleep", "(", "0.1", ")", "s", ".", "close", "(", ")" ]
Sends a ping packet to ID's from 0 to maximum and prints out any returned messages. Actually send a broadcast and will retry (resend) the ping 3 times ...
[ "Sends", "a", "ping", "packet", "to", "ID", "s", "from", "0", "to", "maximum", "and", "prints", "out", "any", "returned", "messages", "." ]
python
train
19.860465
truemped/tornadotools
tornadotools/caching.py
https://github.com/truemped/tornadotools/blob/d22632b83810afc353fa886fbc9e265bee78653f/tornadotools/caching.py#L75-L87
def cache_key(*args, **kwargs): """ Base method for computing the cache key with respect to the given arguments. """ key = "" for arg in args: if callable(arg): key += ":%s" % repr(arg) else: key += ":%s" % str(arg) return key
[ "def", "cache_key", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "\"\"", "for", "arg", "in", "args", ":", "if", "callable", "(", "arg", ")", ":", "key", "+=", "\":%s\"", "%", "repr", "(", "arg", ")", "else", ":", "key", "+=", "\":%s\"", "%", "str", "(", "arg", ")", "return", "key" ]
Base method for computing the cache key with respect to the given arguments.
[ "Base", "method", "for", "computing", "the", "cache", "key", "with", "respect", "to", "the", "given", "arguments", "." ]
python
train
21.769231
elemoine/papyrus
papyrus/xsd.py
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/xsd.py#L73-L144
def add_column_xsd(self, tb, column, attrs): """ Add the XSD for a column to tb (a TreeBuilder) """ if column.nullable: attrs['minOccurs'] = str(0) attrs['nillable'] = 'true' for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES): if isinstance(column.type, cls): attrs['type'] = xsd_type with tag(tb, 'xsd:element', attrs) as tb: self.element_callback(tb, column) return tb if isinstance(column.type, Geometry): geometry_type = column.type.geometry_type xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type] attrs['type'] = xsd_type with tag(tb, 'xsd:element', attrs) as tb: self.element_callback(tb, column) return tb if isinstance(column.type, sqlalchemy.Enum): with tag(tb, 'xsd:element', attrs) as tb: with tag(tb, 'xsd:simpleType') as tb: with tag(tb, 'xsd:restriction', {'base': 'xsd:string'}) \ as tb: for enum in column.type.enums: with tag(tb, 'xsd:enumeration', {'value': enum}): pass self.element_callback(tb, column) return tb if isinstance(column.type, sqlalchemy.Numeric): if column.type.scale is None and column.type.precision is None: attrs['type'] = 'xsd:decimal' with tag(tb, 'xsd:element', attrs) as tb: self.element_callback(tb, column) return tb else: with tag(tb, 'xsd:element', attrs) as tb: with tag(tb, 'xsd:simpleType') as tb: with tag(tb, 'xsd:restriction', {'base': 'xsd:decimal'}) as tb: if column.type.scale is not None: with tag(tb, 'xsd:fractionDigits', {'value': str(column.type.scale)}) \ as tb: pass if column.type.precision is not None: precision = column.type.precision with tag(tb, 'xsd:totalDigits', {'value': str(precision)}) \ as tb: pass self.element_callback(tb, column) return tb if isinstance(column.type, sqlalchemy.String) \ or isinstance(column.type, sqlalchemy.Text) \ or isinstance(column.type, sqlalchemy.Unicode) \ or isinstance(column.type, sqlalchemy.UnicodeText): if column.type.length is None: attrs['type'] = 'xsd:string' with tag(tb, 'xsd:element', attrs) as tb: self.element_callback(tb, column) return tb else: with tag(tb, 'xsd:element', attrs) as tb: with tag(tb, 'xsd:simpleType') as tb: with tag(tb, 'xsd:restriction', {'base': 'xsd:string'}) as tb: with tag(tb, 'xsd:maxLength', {'value': str(column.type.length)}): pass self.element_callback(tb, column) return tb raise UnsupportedColumnTypeError(column.type)
[ "def", "add_column_xsd", "(", "self", ",", "tb", ",", "column", ",", "attrs", ")", ":", "if", "column", ".", "nullable", ":", "attrs", "[", "'minOccurs'", "]", "=", "str", "(", "0", ")", "attrs", "[", "'nillable'", "]", "=", "'true'", "for", "cls", ",", "xsd_type", "in", "six", ".", "iteritems", "(", "self", ".", "SIMPLE_XSD_TYPES", ")", ":", "if", "isinstance", "(", "column", ".", "type", ",", "cls", ")", ":", "attrs", "[", "'type'", "]", "=", "xsd_type", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "if", "isinstance", "(", "column", ".", "type", ",", "Geometry", ")", ":", "geometry_type", "=", "column", ".", "type", ".", "geometry_type", "xsd_type", "=", "self", ".", "SIMPLE_GEOMETRY_XSD_TYPES", "[", "geometry_type", "]", "attrs", "[", "'type'", "]", "=", "xsd_type", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "if", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "Enum", ")", ":", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:simpleType'", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:restriction'", ",", "{", "'base'", ":", "'xsd:string'", "}", ")", "as", "tb", ":", "for", "enum", "in", "column", ".", "type", ".", "enums", ":", "with", "tag", "(", "tb", ",", "'xsd:enumeration'", ",", "{", "'value'", ":", "enum", "}", ")", ":", "pass", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "if", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "Numeric", ")", ":", "if", "column", ".", "type", ".", "scale", "is", "None", "and", "column", ".", "type", ".", "precision", "is", "None", ":", "attrs", "[", "'type'", "]", "=", "'xsd:decimal'", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "else", ":", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:simpleType'", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:restriction'", ",", "{", "'base'", ":", "'xsd:decimal'", "}", ")", "as", "tb", ":", "if", "column", ".", "type", ".", "scale", "is", "not", "None", ":", "with", "tag", "(", "tb", ",", "'xsd:fractionDigits'", ",", "{", "'value'", ":", "str", "(", "column", ".", "type", ".", "scale", ")", "}", ")", "as", "tb", ":", "pass", "if", "column", ".", "type", ".", "precision", "is", "not", "None", ":", "precision", "=", "column", ".", "type", ".", "precision", "with", "tag", "(", "tb", ",", "'xsd:totalDigits'", ",", "{", "'value'", ":", "str", "(", "precision", ")", "}", ")", "as", "tb", ":", "pass", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "if", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "String", ")", "or", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "Text", ")", "or", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "Unicode", ")", "or", "isinstance", "(", "column", ".", "type", ",", "sqlalchemy", ".", "UnicodeText", ")", ":", "if", "column", ".", "type", ".", "length", "is", "None", ":", "attrs", "[", "'type'", "]", "=", "'xsd:string'", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "else", ":", "with", "tag", "(", "tb", ",", "'xsd:element'", ",", "attrs", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:simpleType'", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:restriction'", ",", "{", "'base'", ":", "'xsd:string'", "}", ")", "as", "tb", ":", "with", "tag", "(", "tb", ",", "'xsd:maxLength'", ",", "{", "'value'", ":", "str", "(", "column", ".", "type", ".", "length", ")", "}", ")", ":", "pass", "self", ".", "element_callback", "(", "tb", ",", "column", ")", "return", "tb", "raise", "UnsupportedColumnTypeError", "(", "column", ".", "type", ")" ]
Add the XSD for a column to tb (a TreeBuilder)
[ "Add", "the", "XSD", "for", "a", "column", "to", "tb", "(", "a", "TreeBuilder", ")" ]
python
train
50.611111
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L372-L379
def view_package_path(self, package: str) -> _PATH: '''Print the path to the APK of the given.''' if package not in self.view_packgets_list(): raise NoSuchPackageException( f'There is no such package {package!r}.') output, _ = self._execute( '-s', self.device_sn, 'shell', 'pm', 'path', package) return output[8:-1]
[ "def", "view_package_path", "(", "self", ",", "package", ":", "str", ")", "->", "_PATH", ":", "if", "package", "not", "in", "self", ".", "view_packgets_list", "(", ")", ":", "raise", "NoSuchPackageException", "(", "f'There is no such package {package!r}.'", ")", "output", ",", "_", "=", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'pm'", ",", "'path'", ",", "package", ")", "return", "output", "[", "8", ":", "-", "1", "]" ]
Print the path to the APK of the given.
[ "Print", "the", "path", "to", "the", "APK", "of", "the", "given", "." ]
python
train
47.5
intel-analytics/BigDL
pyspark/bigdl/transform/vision/image.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L36-L41
def transform(self, image_feature, bigdl_type="float"): """ transform ImageFeature """ callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature) return image_feature
[ "def", "transform", "(", "self", ",", "image_feature", ",", "bigdl_type", "=", "\"float\"", ")", ":", "callBigDlFunc", "(", "bigdl_type", ",", "\"transformImageFeature\"", ",", "self", ".", "value", ",", "image_feature", ")", "return", "image_feature" ]
transform ImageFeature
[ "transform", "ImageFeature" ]
python
test
36.666667
tanghaibao/jcvi
jcvi/projects/misc.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/misc.py#L152-L211
def birch(args): """ %prog birch seqids layout Plot birch macro-synteny, with an embedded phylogenetic tree to the right. """ p = OptionParser(birch.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x6") if len(args) != 2: sys.exit(not p.print_help()) seqids, layout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) K = Karyotype(fig, root, seqids, layout) L = K.layout xs = .79 dt = dict(rectangle=False, circle=False) # Embed a phylogenetic tree to the right coords = {} coords["Amborella"] = (xs, L[0].y) coords["Vitis"] = (xs, L[1].y) coords["Prunus"] = (xs, L[2].y) coords["Betula"] = (xs, L[3].y) coords["Populus"] = (xs, L[4].y) coords["Arabidopsis"] = (xs, L[5].y) coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt) coords["malvids"] = join_nodes(root, coords, \ "Populus", "Arabidopsis", xs, **dt) coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt) coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt) coords["angiosperm"] = join_nodes(root, coords, \ "eudicots", "Amborella", xs, **dt) # Show branch length branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0") branch_length(root, coords["eudicots"], coords["angiosperm"], ">78.2", va="top") branch_length(root, coords["Vitis"], coords["eudicots"], "138.5") branch_length(root, coords["rosids"], coords["eudicots"], "19.8", va="top") branch_length(root, coords["Prunus"], coords["fabids"], "104.2", ha="right", va="top") branch_length(root, coords["Arabidopsis"], coords["malvids"], "110.2", va="top") branch_length(root, coords["fabids"], coords["rosids"], "19.8", ha="right", va="top") branch_length(root, coords["malvids"], coords["rosids"], "8.5", va="top") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "birch" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "birch", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "birch", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"8x6\"", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "seqids", ",", "layout", "=", "args", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "K", "=", "Karyotype", "(", "fig", ",", "root", ",", "seqids", ",", "layout", ")", "L", "=", "K", ".", "layout", "xs", "=", ".79", "dt", "=", "dict", "(", "rectangle", "=", "False", ",", "circle", "=", "False", ")", "# Embed a phylogenetic tree to the right", "coords", "=", "{", "}", "coords", "[", "\"Amborella\"", "]", "=", "(", "xs", ",", "L", "[", "0", "]", ".", "y", ")", "coords", "[", "\"Vitis\"", "]", "=", "(", "xs", ",", "L", "[", "1", "]", ".", "y", ")", "coords", "[", "\"Prunus\"", "]", "=", "(", "xs", ",", "L", "[", "2", "]", ".", "y", ")", "coords", "[", "\"Betula\"", "]", "=", "(", "xs", ",", "L", "[", "3", "]", ".", "y", ")", "coords", "[", "\"Populus\"", "]", "=", "(", "xs", ",", "L", "[", "4", "]", ".", "y", ")", "coords", "[", "\"Arabidopsis\"", "]", "=", "(", "xs", ",", "L", "[", "5", "]", ".", "y", ")", "coords", "[", "\"fabids\"", "]", "=", "join_nodes", "(", "root", ",", "coords", ",", "\"Prunus\"", ",", "\"Betula\"", ",", "xs", ",", "*", "*", "dt", ")", "coords", "[", "\"malvids\"", "]", "=", "join_nodes", "(", "root", ",", "coords", ",", "\"Populus\"", ",", "\"Arabidopsis\"", ",", "xs", ",", "*", "*", "dt", ")", "coords", "[", "\"rosids\"", "]", "=", "join_nodes", "(", "root", ",", "coords", ",", "\"fabids\"", ",", "\"malvids\"", ",", "xs", ",", "*", "*", "dt", ")", "coords", "[", "\"eudicots\"", "]", "=", "join_nodes", "(", "root", ",", "coords", ",", "\"rosids\"", ",", "\"Vitis\"", ",", "xs", ",", "*", "*", "dt", ")", "coords", "[", "\"angiosperm\"", "]", "=", "join_nodes", "(", "root", ",", "coords", ",", "\"eudicots\"", ",", "\"Amborella\"", ",", "xs", ",", "*", "*", "dt", ")", "# Show branch length", "branch_length", "(", "root", ",", "coords", "[", "\"Amborella\"", "]", ",", "coords", "[", "\"angiosperm\"", "]", ",", "\">160.0\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"eudicots\"", "]", ",", "coords", "[", "\"angiosperm\"", "]", ",", "\">78.2\"", ",", "va", "=", "\"top\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"Vitis\"", "]", ",", "coords", "[", "\"eudicots\"", "]", ",", "\"138.5\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"rosids\"", "]", ",", "coords", "[", "\"eudicots\"", "]", ",", "\"19.8\"", ",", "va", "=", "\"top\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"Prunus\"", "]", ",", "coords", "[", "\"fabids\"", "]", ",", "\"104.2\"", ",", "ha", "=", "\"right\"", ",", "va", "=", "\"top\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"Arabidopsis\"", "]", ",", "coords", "[", "\"malvids\"", "]", ",", "\"110.2\"", ",", "va", "=", "\"top\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"fabids\"", "]", ",", "coords", "[", "\"rosids\"", "]", ",", "\"19.8\"", ",", "ha", "=", "\"right\"", ",", "va", "=", "\"top\"", ")", "branch_length", "(", "root", ",", "coords", "[", "\"malvids\"", "]", ",", "coords", "[", "\"rosids\"", "]", ",", "\"8.5\"", ",", "va", "=", "\"top\"", ")", "root", ".", "set_xlim", "(", "0", ",", "1", ")", "root", ".", "set_ylim", "(", "0", ",", "1", ")", "root", ".", "set_axis_off", "(", ")", "pf", "=", "\"birch\"", "image_name", "=", "pf", "+", "\".\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")" ]
%prog birch seqids layout Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
[ "%prog", "birch", "seqids", "layout" ]
python
train
37
TUNE-Archive/freight_forwarder
freight_forwarder/container/host_config.py
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/host_config.py#L418-L434
def port_bindings(self, value): """ { u'8080/tcp': [ { u'host_port': u'8080', u'host_ip': u'' } ] } """ if isinstance(value, (list, dict)): self._port_bindings = self._convert_port_bindings(value) elif value is None: self._port_bindings = None else: raise TypeError('port bindings must be a dict, list, or None. {0} was passed.'.format(type(value)))
[ "def", "port_bindings", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ")", ")", ":", "self", ".", "_port_bindings", "=", "self", ".", "_convert_port_bindings", "(", "value", ")", "elif", "value", "is", "None", ":", "self", ".", "_port_bindings", "=", "None", "else", ":", "raise", "TypeError", "(", "'port bindings must be a dict, list, or None. {0} was passed.'", ".", "format", "(", "type", "(", "value", ")", ")", ")" ]
{ u'8080/tcp': [ { u'host_port': u'8080', u'host_ip': u'' } ] }
[ "{", "u", "8080", "/", "tcp", ":", "[", "{", "u", "host_port", ":", "u", "8080", "u", "host_ip", ":", "u", "}", "]", "}" ]
python
train
32.764706
gitpython-developers/GitPython
git/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L619-L626
def remove(cls, repo, name): """Remove the remote with the given name :return: the passed remote name to remove """ repo.git.remote("rm", name) if isinstance(name, cls): name._clear_cache() return name
[ "def", "remove", "(", "cls", ",", "repo", ",", "name", ")", ":", "repo", ".", "git", ".", "remote", "(", "\"rm\"", ",", "name", ")", "if", "isinstance", "(", "name", ",", "cls", ")", ":", "name", ".", "_clear_cache", "(", ")", "return", "name" ]
Remove the remote with the given name :return: the passed remote name to remove
[ "Remove", "the", "remote", "with", "the", "given", "name", ":", "return", ":", "the", "passed", "remote", "name", "to", "remove" ]
python
train
31.75
minhhoit/yacms
yacms/core/templatetags/yacms_tags.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/templatetags/yacms_tags.py#L466-L474
def richtext_filters(content): """ Takes a value edited via the WYSIWYG editor, and passes it through each of the functions specified by the RICHTEXT_FILTERS setting. """ for filter_name in settings.RICHTEXT_FILTERS: filter_func = import_dotted_path(filter_name) content = filter_func(content) return content
[ "def", "richtext_filters", "(", "content", ")", ":", "for", "filter_name", "in", "settings", ".", "RICHTEXT_FILTERS", ":", "filter_func", "=", "import_dotted_path", "(", "filter_name", ")", "content", "=", "filter_func", "(", "content", ")", "return", "content" ]
Takes a value edited via the WYSIWYG editor, and passes it through each of the functions specified by the RICHTEXT_FILTERS setting.
[ "Takes", "a", "value", "edited", "via", "the", "WYSIWYG", "editor", "and", "passes", "it", "through", "each", "of", "the", "functions", "specified", "by", "the", "RICHTEXT_FILTERS", "setting", "." ]
python
train
37.777778
alex-kostirin/pyatomac
atomac/ldtpd/combo_box.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/combo_box.py#L241-L258
def hidelist(self, window_name, object_name): """ Hide combo box list / menu @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) object_handle.activate() object_handle.sendKey(AXKeyCodeConstants.ESCAPE) return 1
[ "def", "hidelist", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "object_handle", ".", "activate", "(", ")", "object_handle", ".", "sendKey", "(", "AXKeyCodeConstants", ".", "ESCAPE", ")", "return", "1" ]
Hide combo box list / menu @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
[ "Hide", "combo", "box", "list", "/", "menu", "@param", "window_name", ":", "Window", "name", "to", "type", "in", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "window_name", ":", "string", "@param", "object_name", ":", "Object", "name", "to", "type", "in", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "object_name", ":", "string" ]
python
valid
35.444444
pikepdf/pikepdf
src/pikepdf/models/metadata.py
https://github.com/pikepdf/pikepdf/blob/07154f4dec007e2e9c0c6a8c07b964fd06bc5f77/src/pikepdf/models/metadata.py#L96-L124
def encode_pdf_date(d: datetime) -> str: """Encode Python datetime object as PDF date string From Adobe pdfmark manual: (D:YYYYMMDDHHmmSSOHH'mm') D: is an optional prefix. YYYY is the year. All fields after the year are optional. MM is the month (01-12), DD is the day (01-31), HH is the hour (00-23), mm are the minutes (00-59), and SS are the seconds (00-59). The remainder of the string defines the relation of local time to GMT. O is either + for a positive difference (local time is later than GMT) or - (minus) for a negative difference. HH' is the absolute value of the offset from GMT in hours, and mm' is the absolute value of the offset in minutes. If no GMT information is specified, the relation between the specified time and GMT is considered unknown. Regardless of whether or not GMT information is specified, the remainder of the string should specify the local time. """ # The formatting of %Y is not consistent as described in # https://bugs.python.org/issue13305 and underspecification in libc. # So explicitly format the year with leading zeros s = "{:04d}".format(d.year) s += d.strftime(r'%m%d%H%M%S') tz = d.strftime('%z') if tz: sign, tz_hours, tz_mins = tz[0], tz[1:3], tz[3:5] s += "{}{}'{}'".format(sign, tz_hours, tz_mins) return s
[ "def", "encode_pdf_date", "(", "d", ":", "datetime", ")", "->", "str", ":", "# The formatting of %Y is not consistent as described in", "# https://bugs.python.org/issue13305 and underspecification in libc.", "# So explicitly format the year with leading zeros", "s", "=", "\"{:04d}\"", ".", "format", "(", "d", ".", "year", ")", "s", "+=", "d", ".", "strftime", "(", "r'%m%d%H%M%S'", ")", "tz", "=", "d", ".", "strftime", "(", "'%z'", ")", "if", "tz", ":", "sign", ",", "tz_hours", ",", "tz_mins", "=", "tz", "[", "0", "]", ",", "tz", "[", "1", ":", "3", "]", ",", "tz", "[", "3", ":", "5", "]", "s", "+=", "\"{}{}'{}'\"", ".", "format", "(", "sign", ",", "tz_hours", ",", "tz_mins", ")", "return", "s" ]
Encode Python datetime object as PDF date string From Adobe pdfmark manual: (D:YYYYMMDDHHmmSSOHH'mm') D: is an optional prefix. YYYY is the year. All fields after the year are optional. MM is the month (01-12), DD is the day (01-31), HH is the hour (00-23), mm are the minutes (00-59), and SS are the seconds (00-59). The remainder of the string defines the relation of local time to GMT. O is either + for a positive difference (local time is later than GMT) or - (minus) for a negative difference. HH' is the absolute value of the offset from GMT in hours, and mm' is the absolute value of the offset in minutes. If no GMT information is specified, the relation between the specified time and GMT is considered unknown. Regardless of whether or not GMT information is specified, the remainder of the string should specify the local time.
[ "Encode", "Python", "datetime", "object", "as", "PDF", "date", "string" ]
python
train
46.37931
SheffieldML/GPy
GPy/util/warping_functions.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/warping_functions.py#L108-L128
def fgrad_y(self, y, return_precalc=False): """ gradient of f w.r.t to y ([N x 1]) :returns: Nx1 vector of derivatives, unless return_precalc is true, then it also returns the precomputed stuff """ d = self.d mpsi = self.psi # vectorized version S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T R = np.tanh(S) D = 1 - (R ** 2) GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T if return_precalc: return GRAD, S, R, D return GRAD
[ "def", "fgrad_y", "(", "self", ",", "y", ",", "return_precalc", "=", "False", ")", ":", "d", "=", "self", ".", "d", "mpsi", "=", "self", ".", "psi", "# vectorized version", "S", "=", "(", "mpsi", "[", ":", ",", "1", "]", "*", "(", "y", "[", ":", ",", ":", ",", "None", "]", "+", "mpsi", "[", ":", ",", "2", "]", ")", ")", ".", "T", "R", "=", "np", ".", "tanh", "(", "S", ")", "D", "=", "1", "-", "(", "R", "**", "2", ")", "GRAD", "=", "(", "d", "+", "(", "mpsi", "[", ":", ",", "0", ":", "1", "]", "[", ":", ",", ":", ",", "None", "]", "*", "mpsi", "[", ":", ",", "1", ":", "2", "]", "[", ":", ",", ":", ",", "None", "]", "*", "D", ")", ".", "sum", "(", "axis", "=", "0", ")", ")", ".", "T", "if", "return_precalc", ":", "return", "GRAD", ",", "S", ",", "R", ",", "D", "return", "GRAD" ]
gradient of f w.r.t to y ([N x 1]) :returns: Nx1 vector of derivatives, unless return_precalc is true, then it also returns the precomputed stuff
[ "gradient", "of", "f", "w", ".", "r", ".", "t", "to", "y", "(", "[", "N", "x", "1", "]", ")" ]
python
train
26.857143
ibis-project/ibis
ibis/expr/rules.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/rules.py#L170-L178
def instance_of(klass, arg): """Require that a value has a particular Python type.""" if not isinstance(arg, klass): raise com.IbisTypeError( 'Given argument with type {} is not an instance of {}'.format( type(arg), klass ) ) return arg
[ "def", "instance_of", "(", "klass", ",", "arg", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "klass", ")", ":", "raise", "com", ".", "IbisTypeError", "(", "'Given argument with type {} is not an instance of {}'", ".", "format", "(", "type", "(", "arg", ")", ",", "klass", ")", ")", "return", "arg" ]
Require that a value has a particular Python type.
[ "Require", "that", "a", "value", "has", "a", "particular", "Python", "type", "." ]
python
train
32.888889
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L1191-L1201
def get_tab_tip(self, filename, is_modified=None, is_readonly=None): """Return tab menu title""" text = u"%s — %s" text = self.__modified_readonly_title(text, is_modified, is_readonly) if self.tempfile_path is not None\ and filename == encoding.to_unicode_from_fs(self.tempfile_path): temp_file_str = to_text_string(_("Temporary file")) return text % (temp_file_str, self.tempfile_path) else: return text % (osp.basename(filename), osp.dirname(filename))
[ "def", "get_tab_tip", "(", "self", ",", "filename", ",", "is_modified", "=", "None", ",", "is_readonly", "=", "None", ")", ":", "text", "=", "u\"%s — %s\"\r", "text", "=", "self", ".", "__modified_readonly_title", "(", "text", ",", "is_modified", ",", "is_readonly", ")", "if", "self", ".", "tempfile_path", "is", "not", "None", "and", "filename", "==", "encoding", ".", "to_unicode_from_fs", "(", "self", ".", "tempfile_path", ")", ":", "temp_file_str", "=", "to_text_string", "(", "_", "(", "\"Temporary file\"", ")", ")", "return", "text", "%", "(", "temp_file_str", ",", "self", ".", "tempfile_path", ")", "else", ":", "return", "text", "%", "(", "osp", ".", "basename", "(", "filename", ")", ",", "osp", ".", "dirname", "(", "filename", ")", ")" ]
Return tab menu title
[ "Return", "tab", "menu", "title" ]
python
train
53.363636
luckydonald/pytgbot
pytgbot/api_types/receivable/updates.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/receivable/updates.py#L1295-L1311
def from_array(array): """ Deserialize a new ResponseParameters from a given dictionary. :return: new ResponseParameters instance. :rtype: ResponseParameters """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") data = {} data['migrate_to_chat_id'] = int(array.get('migrate_to_chat_id')) if array.get('migrate_to_chat_id') is not None else None data['retry_after'] = int(array.get('retry_after')) if array.get('retry_after') is not None else None data['_raw'] = array return ResponseParameters(**data)
[ "def", "from_array", "(", "array", ")", ":", "if", "array", "is", "None", "or", "not", "array", ":", "return", "None", "# end if", "assert_type_or_raise", "(", "array", ",", "dict", ",", "parameter_name", "=", "\"array\"", ")", "data", "=", "{", "}", "data", "[", "'migrate_to_chat_id'", "]", "=", "int", "(", "array", ".", "get", "(", "'migrate_to_chat_id'", ")", ")", "if", "array", ".", "get", "(", "'migrate_to_chat_id'", ")", "is", "not", "None", "else", "None", "data", "[", "'retry_after'", "]", "=", "int", "(", "array", ".", "get", "(", "'retry_after'", ")", ")", "if", "array", ".", "get", "(", "'retry_after'", ")", "is", "not", "None", "else", "None", "data", "[", "'_raw'", "]", "=", "array", "return", "ResponseParameters", "(", "*", "*", "data", ")" ]
Deserialize a new ResponseParameters from a given dictionary. :return: new ResponseParameters instance. :rtype: ResponseParameters
[ "Deserialize", "a", "new", "ResponseParameters", "from", "a", "given", "dictionary", "." ]
python
train
39