repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
shi-cong/PYSTUDY
PYSTUDY/net/fingerlib.py
https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/net/fingerlib.py#L7-L33
def get_host_finger(protocol, ip, port, timeout=5): """ 获取远程主机特定端口下服务的指纹 :param protocol: 协议,tcp / udp :params ip: ip :params port: 端口 :return: 服务器指纹 """ client = None msg = b"Hello, Server\r\n" if protocol == 'tcp': # tcp 协议 client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.settimeout(timeout) client.connect((ip, port)) client.send(msg) elif protocol == 'udp': # udp 协议 client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) client.settimeout(timeout) client.sendto(msg, (ip, port)) else: raise Exception('协议不支持') serverFinger = client.recv(1024) client.close() return serverFinger
[ "def", "get_host_finger", "(", "protocol", ",", "ip", ",", "port", ",", "timeout", "=", "5", ")", ":", "client", "=", "None", "msg", "=", "b\"Hello, Server\\r\\n\"", "if", "protocol", "==", "'tcp'", ":", "# tcp 协议", "client", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "client", ".", "settimeout", "(", "timeout", ")", "client", ".", "connect", "(", "(", "ip", ",", "port", ")", ")", "client", ".", "send", "(", "msg", ")", "elif", "protocol", "==", "'udp'", ":", "# udp 协议", "client", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "client", ".", "settimeout", "(", "timeout", ")", "client", ".", "sendto", "(", "msg", ",", "(", "ip", ",", "port", ")", ")", "else", ":", "raise", "Exception", "(", "'协议不支持')", "", "serverFinger", "=", "client", ".", "recv", "(", "1024", ")", "client", ".", "close", "(", ")", "return", "serverFinger" ]
获取远程主机特定端口下服务的指纹 :param protocol: 协议,tcp / udp :params ip: ip :params port: 端口 :return: 服务器指纹
[ "获取远程主机特定端口下服务的指纹", ":", "param", "protocol", ":", "协议,tcp", "/", "udp", ":", "params", "ip", ":", "ip", ":", "params", "port", ":", "端口", ":", "return", ":", "服务器指纹" ]
python
train
newville/wxmplot
wxmplot/utils.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/utils.py#L68-L76
def pack(window, sizer, expand=1.1): "simple wxPython pack function" tsize = window.GetSize() msize = window.GetMinSize() window.SetSizer(sizer) sizer.Fit(window) nsize = (10*int(expand*(max(msize[0], tsize[0])/10)), 10*int(expand*(max(msize[1], tsize[1])/10.))) window.SetSize(nsize)
[ "def", "pack", "(", "window", ",", "sizer", ",", "expand", "=", "1.1", ")", ":", "tsize", "=", "window", ".", "GetSize", "(", ")", "msize", "=", "window", ".", "GetMinSize", "(", ")", "window", ".", "SetSizer", "(", "sizer", ")", "sizer", ".", "Fit", "(", "window", ")", "nsize", "=", "(", "10", "*", "int", "(", "expand", "*", "(", "max", "(", "msize", "[", "0", "]", ",", "tsize", "[", "0", "]", ")", "/", "10", ")", ")", ",", "10", "*", "int", "(", "expand", "*", "(", "max", "(", "msize", "[", "1", "]", ",", "tsize", "[", "1", "]", ")", "/", "10.", ")", ")", ")", "window", ".", "SetSize", "(", "nsize", ")" ]
simple wxPython pack function
[ "simple", "wxPython", "pack", "function" ]
python
train
amreuland/PyVDF
PyVDF/__init__.py
https://github.com/amreuland/PyVDF/blob/3d546178153204a1286c61be8f641b59f6c2750e/PyVDF/__init__.py#L343-L367
def edit(self, path, value): """ Edit a key value :param path: The path key for the value :type path: :py:obj:`str` :param value: The value to be set :type value: :py:obj:`str` """ _dict = PyVDF.__UseDict p = [re.sub('[\[\]]', '', w) for w in PyVDF.__RE_Path_Seperator.findall(path)] array = self.getData() a = array for c in p[:-1]: try: if not isinstance(a[c], dict): a[c] = _dict() except KeyError: a[c] = _dict() a = a[c] if value == None: a.pop(p[-1], None) else: a[p[-1]] = value self.__data = array
[ "def", "edit", "(", "self", ",", "path", ",", "value", ")", ":", "_dict", "=", "PyVDF", ".", "__UseDict", "p", "=", "[", "re", ".", "sub", "(", "'[\\[\\]]'", ",", "''", ",", "w", ")", "for", "w", "in", "PyVDF", ".", "__RE_Path_Seperator", ".", "findall", "(", "path", ")", "]", "array", "=", "self", ".", "getData", "(", ")", "a", "=", "array", "for", "c", "in", "p", "[", ":", "-", "1", "]", ":", "try", ":", "if", "not", "isinstance", "(", "a", "[", "c", "]", ",", "dict", ")", ":", "a", "[", "c", "]", "=", "_dict", "(", ")", "except", "KeyError", ":", "a", "[", "c", "]", "=", "_dict", "(", ")", "a", "=", "a", "[", "c", "]", "if", "value", "==", "None", ":", "a", ".", "pop", "(", "p", "[", "-", "1", "]", ",", "None", ")", "else", ":", "a", "[", "p", "[", "-", "1", "]", "]", "=", "value", "self", ".", "__data", "=", "array" ]
Edit a key value :param path: The path key for the value :type path: :py:obj:`str` :param value: The value to be set :type value: :py:obj:`str`
[ "Edit", "a", "key", "value" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/topo_disc/pub_lldp_api.py#L139-L146
def get_remote_chassis_id_mac(self, tlv_data): """Returns Remote Chassis ID MAC from the TLV. """ ret, parsed_val = self._check_common_tlv_format( tlv_data, "MAC:", "Chassis ID TLV") if not ret: return None mac = parsed_val[1].split('\n') return mac[0].strip()
[ "def", "get_remote_chassis_id_mac", "(", "self", ",", "tlv_data", ")", ":", "ret", ",", "parsed_val", "=", "self", ".", "_check_common_tlv_format", "(", "tlv_data", ",", "\"MAC:\"", ",", "\"Chassis ID TLV\"", ")", "if", "not", "ret", ":", "return", "None", "mac", "=", "parsed_val", "[", "1", "]", ".", "split", "(", "'\\n'", ")", "return", "mac", "[", "0", "]", ".", "strip", "(", ")" ]
Returns Remote Chassis ID MAC from the TLV.
[ "Returns", "Remote", "Chassis", "ID", "MAC", "from", "the", "TLV", "." ]
python
train
ewels/MultiQC
multiqc/utils/config.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/utils/config.py#L212-L220
def update_dict(d, u): """ Recursively updates nested dict d from nested dict u """ for key, val in u.items(): if isinstance(val, collections.Mapping): d[key] = update_dict(d.get(key, {}), val) else: d[key] = u[key] return d
[ "def", "update_dict", "(", "d", ",", "u", ")", ":", "for", "key", ",", "val", "in", "u", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "collections", ".", "Mapping", ")", ":", "d", "[", "key", "]", "=", "update_dict", "(", "d", ".", "get", "(", "key", ",", "{", "}", ")", ",", "val", ")", "else", ":", "d", "[", "key", "]", "=", "u", "[", "key", "]", "return", "d" ]
Recursively updates nested dict d from nested dict u
[ "Recursively", "updates", "nested", "dict", "d", "from", "nested", "dict", "u" ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L641-L647
def rfc2426(self): """RFC2426-encode the field content. :return: the field in the RFC 2426 format. :returntype: `str`""" return rfc2425encode("label",u"\n".join(self.lines), {"type":",".join(self.type)})
[ "def", "rfc2426", "(", "self", ")", ":", "return", "rfc2425encode", "(", "\"label\"", ",", "u\"\\n\"", ".", "join", "(", "self", ".", "lines", ")", ",", "{", "\"type\"", ":", "\",\"", ".", "join", "(", "self", ".", "type", ")", "}", ")" ]
RFC2426-encode the field content. :return: the field in the RFC 2426 format. :returntype: `str`
[ "RFC2426", "-", "encode", "the", "field", "content", "." ]
python
valid
thoughtworksarts/EmoPy
EmoPy/src/neuralnets.py
https://github.com/thoughtworksarts/EmoPy/blob/a0ab97b3719ebe0a9de9bfc5adae5e46c9b77fd7/EmoPy/src/neuralnets.py#L74-L92
def _init_model(self): """ Initialize base model from Keras and add top layers to match number of training emotions labels. :return: """ base_model = self._get_base_model() top_layer_model = base_model.output top_layer_model = GlobalAveragePooling2D()(top_layer_model) top_layer_model = Dense(1024, activation='relu')(top_layer_model) prediction_layer = Dense(output_dim=len(self.emotion_map.keys()), activation='softmax')(top_layer_model) model = Model(input=base_model.input, output=prediction_layer) print(model.summary()) for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) self.model = model
[ "def", "_init_model", "(", "self", ")", ":", "base_model", "=", "self", ".", "_get_base_model", "(", ")", "top_layer_model", "=", "base_model", ".", "output", "top_layer_model", "=", "GlobalAveragePooling2D", "(", ")", "(", "top_layer_model", ")", "top_layer_model", "=", "Dense", "(", "1024", ",", "activation", "=", "'relu'", ")", "(", "top_layer_model", ")", "prediction_layer", "=", "Dense", "(", "output_dim", "=", "len", "(", "self", ".", "emotion_map", ".", "keys", "(", ")", ")", ",", "activation", "=", "'softmax'", ")", "(", "top_layer_model", ")", "model", "=", "Model", "(", "input", "=", "base_model", ".", "input", ",", "output", "=", "prediction_layer", ")", "print", "(", "model", ".", "summary", "(", ")", ")", "for", "layer", "in", "base_model", ".", "layers", ":", "layer", ".", "trainable", "=", "False", "model", ".", "compile", "(", "optimizer", "=", "'rmsprop'", ",", "loss", "=", "'categorical_crossentropy'", ",", "metrics", "=", "[", "'accuracy'", "]", ")", "self", ".", "model", "=", "model" ]
Initialize base model from Keras and add top layers to match number of training emotions labels. :return:
[ "Initialize", "base", "model", "from", "Keras", "and", "add", "top", "layers", "to", "match", "number", "of", "training", "emotions", "labels", ".", ":", "return", ":" ]
python
train
hobson/aima
aima/learning.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/learning.py#L196-L203
def PluralityLearner(dataset): """A very dumb algorithm: always pick the result that was most popular in the training data. Makes a baseline for comparison.""" most_popular = mode([e[dataset.target] for e in dataset.examples]) def predict(example): "Always return same result: the most popular from the training set." return most_popular return predict
[ "def", "PluralityLearner", "(", "dataset", ")", ":", "most_popular", "=", "mode", "(", "[", "e", "[", "dataset", ".", "target", "]", "for", "e", "in", "dataset", ".", "examples", "]", ")", "def", "predict", "(", "example", ")", ":", "\"Always return same result: the most popular from the training set.\"", "return", "most_popular", "return", "predict" ]
A very dumb algorithm: always pick the result that was most popular in the training data. Makes a baseline for comparison.
[ "A", "very", "dumb", "algorithm", ":", "always", "pick", "the", "result", "that", "was", "most", "popular", "in", "the", "training", "data", ".", "Makes", "a", "baseline", "for", "comparison", "." ]
python
valid
noxdafox/vminspect
vminspect/comparator.py
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L377-L384
def files_size(fs0, fs1, files): """Gets the file size of the given files.""" for file_meta in files['deleted_files']: file_meta['size'] = fs0.stat(file_meta['path'])['size'] for file_meta in files['created_files'] + files['modified_files']: file_meta['size'] = fs1.stat(file_meta['path'])['size'] return files
[ "def", "files_size", "(", "fs0", ",", "fs1", ",", "files", ")", ":", "for", "file_meta", "in", "files", "[", "'deleted_files'", "]", ":", "file_meta", "[", "'size'", "]", "=", "fs0", ".", "stat", "(", "file_meta", "[", "'path'", "]", ")", "[", "'size'", "]", "for", "file_meta", "in", "files", "[", "'created_files'", "]", "+", "files", "[", "'modified_files'", "]", ":", "file_meta", "[", "'size'", "]", "=", "fs1", ".", "stat", "(", "file_meta", "[", "'path'", "]", ")", "[", "'size'", "]", "return", "files" ]
Gets the file size of the given files.
[ "Gets", "the", "file", "size", "of", "the", "given", "files", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/item.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L1390-L1425
def explode_contact_groups_into_contacts(item, contactgroups): """ Get all contacts of contact_groups and put them in contacts container :param item: item where have contact_groups property :type item: object :param contactgroups: all contactgroups object :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: None """ if not hasattr(item, 'contact_groups'): return # TODO : See if we can remove this if cgnames = '' if item.contact_groups: if isinstance(item.contact_groups, list): cgnames = item.contact_groups else: cgnames = item.contact_groups.split(',') cgnames = strip_and_uniq(cgnames) for cgname in cgnames: contactgroup = contactgroups.find_by_name(cgname) if not contactgroup: item.add_error("The contact group '%s' defined on the %s '%s' do not exist" % (cgname, item.__class__.my_type, item.get_name())) continue cnames = contactgroups.get_members_of_group(cgname) # We add contacts into our contacts if cnames: if hasattr(item, 'contacts'): # Fix #1054 - bad contact explosion # item.contacts.extend(cnames) item.contacts = item.contacts + cnames else: item.contacts = cnames
[ "def", "explode_contact_groups_into_contacts", "(", "item", ",", "contactgroups", ")", ":", "if", "not", "hasattr", "(", "item", ",", "'contact_groups'", ")", ":", "return", "# TODO : See if we can remove this if", "cgnames", "=", "''", "if", "item", ".", "contact_groups", ":", "if", "isinstance", "(", "item", ".", "contact_groups", ",", "list", ")", ":", "cgnames", "=", "item", ".", "contact_groups", "else", ":", "cgnames", "=", "item", ".", "contact_groups", ".", "split", "(", "','", ")", "cgnames", "=", "strip_and_uniq", "(", "cgnames", ")", "for", "cgname", "in", "cgnames", ":", "contactgroup", "=", "contactgroups", ".", "find_by_name", "(", "cgname", ")", "if", "not", "contactgroup", ":", "item", ".", "add_error", "(", "\"The contact group '%s' defined on the %s '%s' do not exist\"", "%", "(", "cgname", ",", "item", ".", "__class__", ".", "my_type", ",", "item", ".", "get_name", "(", ")", ")", ")", "continue", "cnames", "=", "contactgroups", ".", "get_members_of_group", "(", "cgname", ")", "# We add contacts into our contacts", "if", "cnames", ":", "if", "hasattr", "(", "item", ",", "'contacts'", ")", ":", "# Fix #1054 - bad contact explosion", "# item.contacts.extend(cnames)", "item", ".", "contacts", "=", "item", ".", "contacts", "+", "cnames", "else", ":", "item", ".", "contacts", "=", "cnames" ]
Get all contacts of contact_groups and put them in contacts container :param item: item where have contact_groups property :type item: object :param contactgroups: all contactgroups object :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: None
[ "Get", "all", "contacts", "of", "contact_groups", "and", "put", "them", "in", "contacts", "container" ]
python
train
PyGithub/PyGithub
github/Repository.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L1535-L1555
def get_views_traffic(self, per=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/traffic/views <https://developer.github.com/v3/repos/traffic/>`_ :param per: string, must be one of day or week, day by default :rtype: None or list of :class:`github.View.View` """ assert per is github.GithubObject.NotSet or (isinstance(per, (str, unicode)) and (per == "day" or per == "week")), "per must be day or week, day by default" url_parameters = dict() if per is not github.GithubObject.NotSet: url_parameters["per"] = per headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/traffic/views", parameters=url_parameters ) if (isinstance(data, dict)) and ("views" in data) and (isinstance(data["views"], list)): data["views"] = [ github.View.View(self._requester, headers, item, completed=True) for item in data["views"] ] return data
[ "def", "get_views_traffic", "(", "self", ",", "per", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "per", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "(", "isinstance", "(", "per", ",", "(", "str", ",", "unicode", ")", ")", "and", "(", "per", "==", "\"day\"", "or", "per", "==", "\"week\"", ")", ")", ",", "\"per must be day or week, day by default\"", "url_parameters", "=", "dict", "(", ")", "if", "per", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "url_parameters", "[", "\"per\"", "]", "=", "per", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"GET\"", ",", "self", ".", "url", "+", "\"/traffic/views\"", ",", "parameters", "=", "url_parameters", ")", "if", "(", "isinstance", "(", "data", ",", "dict", ")", ")", "and", "(", "\"views\"", "in", "data", ")", "and", "(", "isinstance", "(", "data", "[", "\"views\"", "]", ",", "list", ")", ")", ":", "data", "[", "\"views\"", "]", "=", "[", "github", ".", "View", ".", "View", "(", "self", ".", "_requester", ",", "headers", ",", "item", ",", "completed", "=", "True", ")", "for", "item", "in", "data", "[", "\"views\"", "]", "]", "return", "data" ]
:calls: `GET /repos/:owner/:repo/traffic/views <https://developer.github.com/v3/repos/traffic/>`_ :param per: string, must be one of day or week, day by default :rtype: None or list of :class:`github.View.View`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "traffic", "/", "views", "<https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "traffic", "/", ">", "_", ":", "param", "per", ":", "string", "must", "be", "one", "of", "day", "or", "week", "day", "by", "default", ":", "rtype", ":", "None", "or", "list", "of", ":", "class", ":", "github", ".", "View", ".", "View" ]
python
train
aeroxis/sultan
src/sultan/result.py
https://github.com/aeroxis/sultan/blob/65b4271a161d6c19a9eb0170b5a95832a139ab7f/src/sultan/result.py#L225-L233
def print_stdout(self, always_print=False): """ Prints the stdout to console - if there is any stdout, otherwise does nothing. :param always_print: print the stdout, even if there is nothing in the buffer (default: false) """ if self.__stdout or always_print: self.__echo.info("--{ STDOUT }---" + "-" * 100) self.__format_lines_info(self.stdout) self.__echo.info("---------------" + "-" * 100)
[ "def", "print_stdout", "(", "self", ",", "always_print", "=", "False", ")", ":", "if", "self", ".", "__stdout", "or", "always_print", ":", "self", ".", "__echo", ".", "info", "(", "\"--{ STDOUT }---\"", "+", "\"-\"", "*", "100", ")", "self", ".", "__format_lines_info", "(", "self", ".", "stdout", ")", "self", ".", "__echo", ".", "info", "(", "\"---------------\"", "+", "\"-\"", "*", "100", ")" ]
Prints the stdout to console - if there is any stdout, otherwise does nothing. :param always_print: print the stdout, even if there is nothing in the buffer (default: false)
[ "Prints", "the", "stdout", "to", "console", "-", "if", "there", "is", "any", "stdout", "otherwise", "does", "nothing", ".", ":", "param", "always_print", ":", "print", "the", "stdout", "even", "if", "there", "is", "nothing", "in", "the", "buffer", "(", "default", ":", "false", ")" ]
python
valid
pyviz/holoviews
holoviews/util/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/__init__.py#L257-L264
def _cellmagic(cls, options, obj, strict=False): "Deprecated, not expected to be used by any current code" options, failure = cls._process_magic(options, strict) if failure: return obj if not isinstance(obj, Dimensioned): return obj else: return StoreOptions.set_options(obj, options)
[ "def", "_cellmagic", "(", "cls", ",", "options", ",", "obj", ",", "strict", "=", "False", ")", ":", "options", ",", "failure", "=", "cls", ".", "_process_magic", "(", "options", ",", "strict", ")", "if", "failure", ":", "return", "obj", "if", "not", "isinstance", "(", "obj", ",", "Dimensioned", ")", ":", "return", "obj", "else", ":", "return", "StoreOptions", ".", "set_options", "(", "obj", ",", "options", ")" ]
Deprecated, not expected to be used by any current code
[ "Deprecated", "not", "expected", "to", "be", "used", "by", "any", "current", "code" ]
python
train
synw/dataswim
dataswim/data/clean.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L159-L174
def to_float(self, col: str, **kwargs): """ Convert colums values to float :param col: name of the colum :type col: str, at least one :param \*\*kwargs: keyword arguments for ``df.astype`` :type \*\*kwargs: optional :example: ``ds.to_float("mycol1")`` """ try: self.df[col] = self.df[col].astype(np.float64, **kwargs) self.ok("Converted column values to float") except Exception as e: self.err(e, "Error converting to float")
[ "def", "to_float", "(", "self", ",", "col", ":", "str", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "df", "[", "col", "]", "=", "self", ".", "df", "[", "col", "]", ".", "astype", "(", "np", ".", "float64", ",", "*", "*", "kwargs", ")", "self", ".", "ok", "(", "\"Converted column values to float\"", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Error converting to float\"", ")" ]
Convert colums values to float :param col: name of the colum :type col: str, at least one :param \*\*kwargs: keyword arguments for ``df.astype`` :type \*\*kwargs: optional :example: ``ds.to_float("mycol1")``
[ "Convert", "colums", "values", "to", "float" ]
python
train
Locu/chronology
pykronos/pykronos/utils/cache.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/pykronos/pykronos/utils/cache.py#L120-L130
def _bucket_time(self, event_time): """ The seconds since epoch that represent a computed bucket. An event bucket is the time of the earliest possible event for that `bucket_width`. Example: if `bucket_width = timedelta(minutes=10)`, bucket times will be the number of seconds since epoch at 12:00, 12:10, ... on each day. """ event_time = kronos_time_to_epoch_time(event_time) return event_time - (event_time % self._bucket_width)
[ "def", "_bucket_time", "(", "self", ",", "event_time", ")", ":", "event_time", "=", "kronos_time_to_epoch_time", "(", "event_time", ")", "return", "event_time", "-", "(", "event_time", "%", "self", ".", "_bucket_width", ")" ]
The seconds since epoch that represent a computed bucket. An event bucket is the time of the earliest possible event for that `bucket_width`. Example: if `bucket_width = timedelta(minutes=10)`, bucket times will be the number of seconds since epoch at 12:00, 12:10, ... on each day.
[ "The", "seconds", "since", "epoch", "that", "represent", "a", "computed", "bucket", "." ]
python
train
gawel/irc3
irc3/base.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/base.py#L156-L171
def attach_events(self, *events, **kwargs): """Attach one or more events to the bot instance""" reg = self.registry insert = 'insert' in kwargs for e in events: cregexp = e.compile(self.config) regexp = getattr(e.regexp, 're', e.regexp) if regexp not in reg.events[e.iotype]: if insert: reg.events_re[e.iotype].insert(0, (regexp, cregexp)) else: reg.events_re[e.iotype].append((regexp, cregexp)) if insert: reg.events[e.iotype][regexp].insert(0, e) else: reg.events[e.iotype][regexp].append(e)
[ "def", "attach_events", "(", "self", ",", "*", "events", ",", "*", "*", "kwargs", ")", ":", "reg", "=", "self", ".", "registry", "insert", "=", "'insert'", "in", "kwargs", "for", "e", "in", "events", ":", "cregexp", "=", "e", ".", "compile", "(", "self", ".", "config", ")", "regexp", "=", "getattr", "(", "e", ".", "regexp", ",", "'re'", ",", "e", ".", "regexp", ")", "if", "regexp", "not", "in", "reg", ".", "events", "[", "e", ".", "iotype", "]", ":", "if", "insert", ":", "reg", ".", "events_re", "[", "e", ".", "iotype", "]", ".", "insert", "(", "0", ",", "(", "regexp", ",", "cregexp", ")", ")", "else", ":", "reg", ".", "events_re", "[", "e", ".", "iotype", "]", ".", "append", "(", "(", "regexp", ",", "cregexp", ")", ")", "if", "insert", ":", "reg", ".", "events", "[", "e", ".", "iotype", "]", "[", "regexp", "]", ".", "insert", "(", "0", ",", "e", ")", "else", ":", "reg", ".", "events", "[", "e", ".", "iotype", "]", "[", "regexp", "]", ".", "append", "(", "e", ")" ]
Attach one or more events to the bot instance
[ "Attach", "one", "or", "more", "events", "to", "the", "bot", "instance" ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/config_file.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/config_file.py#L637-L660
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken, knowledge_base): """Parse the mount command output.""" _ = stderr, time_taken, args, knowledge_base # Unused. self.CheckReturn(cmd, return_val) result = rdf_protodict.AttributedDict() for entry in self._field_parser.ParseEntries(stdout): line_str = " ".join(entry) mount_rslt = self.mount_re.match(line_str) if mount_rslt: device, mount_point, fs_type, option_str = mount_rslt.groups() result = rdf_client_fs.Filesystem() result.device = device result.mount_point = mount_point result.type = fs_type # Parse these options as a dict as some items may be key/values. # KeyValue parser uses OrderedDict as the native parser method. Use it. options = KeyValueParser(term=",").ParseToOrderedDict(option_str) # Keys without values get assigned [] by default. Because these keys are # actually true, if declared, change any [] values to True. for k, v in iteritems(options): options[k] = v or [True] result.options = rdf_protodict.AttributedDict(**options) yield result
[ "def", "Parse", "(", "self", ",", "cmd", ",", "args", ",", "stdout", ",", "stderr", ",", "return_val", ",", "time_taken", ",", "knowledge_base", ")", ":", "_", "=", "stderr", ",", "time_taken", ",", "args", ",", "knowledge_base", "# Unused.", "self", ".", "CheckReturn", "(", "cmd", ",", "return_val", ")", "result", "=", "rdf_protodict", ".", "AttributedDict", "(", ")", "for", "entry", "in", "self", ".", "_field_parser", ".", "ParseEntries", "(", "stdout", ")", ":", "line_str", "=", "\" \"", ".", "join", "(", "entry", ")", "mount_rslt", "=", "self", ".", "mount_re", ".", "match", "(", "line_str", ")", "if", "mount_rslt", ":", "device", ",", "mount_point", ",", "fs_type", ",", "option_str", "=", "mount_rslt", ".", "groups", "(", ")", "result", "=", "rdf_client_fs", ".", "Filesystem", "(", ")", "result", ".", "device", "=", "device", "result", ".", "mount_point", "=", "mount_point", "result", ".", "type", "=", "fs_type", "# Parse these options as a dict as some items may be key/values.", "# KeyValue parser uses OrderedDict as the native parser method. Use it.", "options", "=", "KeyValueParser", "(", "term", "=", "\",\"", ")", ".", "ParseToOrderedDict", "(", "option_str", ")", "# Keys without values get assigned [] by default. Because these keys are", "# actually true, if declared, change any [] values to True.", "for", "k", ",", "v", "in", "iteritems", "(", "options", ")", ":", "options", "[", "k", "]", "=", "v", "or", "[", "True", "]", "result", ".", "options", "=", "rdf_protodict", ".", "AttributedDict", "(", "*", "*", "options", ")", "yield", "result" ]
Parse the mount command output.
[ "Parse", "the", "mount", "command", "output", "." ]
python
train
sentinel-hub/eo-learn
core/eolearn/core/utilities.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/utilities.py#L190-L201
def _parse_feature_names(feature_names, new_names): """Helping function of `_parse_features` that parses a collection of feature names.""" if isinstance(feature_names, set): return FeatureParser._parse_names_set(feature_names) if isinstance(feature_names, dict): return FeatureParser._parse_names_dict(feature_names) if isinstance(feature_names, (tuple, list)): return FeatureParser._parse_names_tuple(feature_names, new_names) raise ValueError('Failed to parse {}, expected dictionary, set or tuple'.format(feature_names))
[ "def", "_parse_feature_names", "(", "feature_names", ",", "new_names", ")", ":", "if", "isinstance", "(", "feature_names", ",", "set", ")", ":", "return", "FeatureParser", ".", "_parse_names_set", "(", "feature_names", ")", "if", "isinstance", "(", "feature_names", ",", "dict", ")", ":", "return", "FeatureParser", ".", "_parse_names_dict", "(", "feature_names", ")", "if", "isinstance", "(", "feature_names", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "FeatureParser", ".", "_parse_names_tuple", "(", "feature_names", ",", "new_names", ")", "raise", "ValueError", "(", "'Failed to parse {}, expected dictionary, set or tuple'", ".", "format", "(", "feature_names", ")", ")" ]
Helping function of `_parse_features` that parses a collection of feature names.
[ "Helping", "function", "of", "_parse_features", "that", "parses", "a", "collection", "of", "feature", "names", "." ]
python
train
git-afsantos/bonsai
bonsai/model.py
https://github.com/git-afsantos/bonsai/blob/aa5af3f535b3b506bfc95c107c501fc9c4bcd072/bonsai/model.py#L56-L71
def filter(self, cls, recursive=False): """Retrieves all descendants (including self) that are instances of a given class. Args: cls (class): The class to use as a filter. Kwargs: recursive (bool): Whether to descend recursively down the tree. """ source = self.walk_preorder if recursive else self._children return [ codeobj for codeobj in source() if isinstance(codeobj, cls) ]
[ "def", "filter", "(", "self", ",", "cls", ",", "recursive", "=", "False", ")", ":", "source", "=", "self", ".", "walk_preorder", "if", "recursive", "else", "self", ".", "_children", "return", "[", "codeobj", "for", "codeobj", "in", "source", "(", ")", "if", "isinstance", "(", "codeobj", ",", "cls", ")", "]" ]
Retrieves all descendants (including self) that are instances of a given class. Args: cls (class): The class to use as a filter. Kwargs: recursive (bool): Whether to descend recursively down the tree.
[ "Retrieves", "all", "descendants", "(", "including", "self", ")", "that", "are", "instances", "of", "a", "given", "class", "." ]
python
train
rraadd88/rohan
rohan/dandage/io_sets.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_sets.py#L38-L49
def rankwithlist(l,lwith,test=False): """ rank l wrt lwith """ if not (isinstance(l,list) and isinstance(lwith,list)): l,lwith=list(l),list(lwith) from scipy.stats import rankdata if test: print(l,lwith) print(rankdata(l),rankdata(lwith)) print(rankdata(l+lwith)) return rankdata(l+lwith)[:len(l)]
[ "def", "rankwithlist", "(", "l", ",", "lwith", ",", "test", "=", "False", ")", ":", "if", "not", "(", "isinstance", "(", "l", ",", "list", ")", "and", "isinstance", "(", "lwith", ",", "list", ")", ")", ":", "l", ",", "lwith", "=", "list", "(", "l", ")", ",", "list", "(", "lwith", ")", "from", "scipy", ".", "stats", "import", "rankdata", "if", "test", ":", "print", "(", "l", ",", "lwith", ")", "print", "(", "rankdata", "(", "l", ")", ",", "rankdata", "(", "lwith", ")", ")", "print", "(", "rankdata", "(", "l", "+", "lwith", ")", ")", "return", "rankdata", "(", "l", "+", "lwith", ")", "[", ":", "len", "(", "l", ")", "]" ]
rank l wrt lwith
[ "rank", "l", "wrt", "lwith" ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1318-L1337
def set_weight(self, weight): """Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight. """ if weight is not None and np.all(weight == 1): weight = None self.weight = weight if self.handle is not None and weight is not None: weight = list_to_1d_numpy(weight, name='weight') self.set_field('weight', weight) return self
[ "def", "set_weight", "(", "self", ",", "weight", ")", ":", "if", "weight", "is", "not", "None", "and", "np", ".", "all", "(", "weight", "==", "1", ")", ":", "weight", "=", "None", "self", ".", "weight", "=", "weight", "if", "self", ".", "handle", "is", "not", "None", "and", "weight", "is", "not", "None", ":", "weight", "=", "list_to_1d_numpy", "(", "weight", ",", "name", "=", "'weight'", ")", "self", ".", "set_field", "(", "'weight'", ",", "weight", ")", "return", "self" ]
Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight.
[ "Set", "weight", "of", "each", "instance", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L111-L125
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_name = ET.SubElement(vlan, "vlan-name") vlan_name.text = kwargs.pop('vlan_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vlan_brief_output_vlan_vlan_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vlan_brief", "=", "ET", ".", "Element", "(", "\"get_vlan_brief\"", ")", "config", "=", "get_vlan_brief", "output", "=", "ET", ".", "SubElement", "(", "get_vlan_brief", ",", "\"output\"", ")", "vlan", "=", "ET", ".", "SubElement", "(", "output", ",", "\"vlan\"", ")", "vlan_id_key", "=", "ET", ".", "SubElement", "(", "vlan", ",", "\"vlan-id\"", ")", "vlan_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'vlan_id'", ")", "vlan_name", "=", "ET", ".", "SubElement", "(", "vlan", ",", "\"vlan-name\"", ")", "vlan_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'vlan_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
sci-bots/svg-model
svg_model/color.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/color.py#L7-L46
def hex_color_to_rgba(hex_color, normalize_to=255): ''' Convert a hex-formatted number (i.e., `"#RGB[A]"` or `"#RRGGBB[AA]"`) to an RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`). Args: hex_color (str) : hex-formatted number (e.g., `"#2fc"`, `"#3c2f8611"`) normalize_to (int, float) : Factor to normalize each channel by Returns: (tuple) : RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`), where range of each channel in tuple is `[0, normalize_to]`. ''' color_pattern_one_digit = (r'#(?P<R>[\da-fA-F])(?P<G>[\da-fA-F])' r'(?P<B>[\da-fA-F])(?P<A>[\da-fA-F])?') color_pattern_two_digit = (r'#(?P<R>[\da-fA-F]{2})(?P<G>[\da-fA-F]{2})' r'(?P<B>[\da-fA-F]{2})(?P<A>[\da-fA-F]{2})?') # First try to match `#rrggbb[aa]`. match = re.match(color_pattern_two_digit, hex_color) if match: channels = match.groupdict() channel_scale = 255 else: # Try to match `#rgb[a]`. match = re.match(color_pattern_one_digit, hex_color) if match: channels = match.groupdict() channel_scale = 15 else: raise ValueError('Color string must be in format #RGB[A] or ' '#RRGGBB[AA] (i.e., alpha channel is optional)') scale = normalize_to / channel_scale return tuple(type(normalize_to)(int(channels[k], 16) * scale) if channels[k] is not None else None for k in 'RGBA')
[ "def", "hex_color_to_rgba", "(", "hex_color", ",", "normalize_to", "=", "255", ")", ":", "color_pattern_one_digit", "=", "(", "r'#(?P<R>[\\da-fA-F])(?P<G>[\\da-fA-F])'", "r'(?P<B>[\\da-fA-F])(?P<A>[\\da-fA-F])?'", ")", "color_pattern_two_digit", "=", "(", "r'#(?P<R>[\\da-fA-F]{2})(?P<G>[\\da-fA-F]{2})'", "r'(?P<B>[\\da-fA-F]{2})(?P<A>[\\da-fA-F]{2})?'", ")", "# First try to match `#rrggbb[aa]`.", "match", "=", "re", ".", "match", "(", "color_pattern_two_digit", ",", "hex_color", ")", "if", "match", ":", "channels", "=", "match", ".", "groupdict", "(", ")", "channel_scale", "=", "255", "else", ":", "# Try to match `#rgb[a]`.", "match", "=", "re", ".", "match", "(", "color_pattern_one_digit", ",", "hex_color", ")", "if", "match", ":", "channels", "=", "match", ".", "groupdict", "(", ")", "channel_scale", "=", "15", "else", ":", "raise", "ValueError", "(", "'Color string must be in format #RGB[A] or '", "'#RRGGBB[AA] (i.e., alpha channel is optional)'", ")", "scale", "=", "normalize_to", "/", "channel_scale", "return", "tuple", "(", "type", "(", "normalize_to", ")", "(", "int", "(", "channels", "[", "k", "]", ",", "16", ")", "*", "scale", ")", "if", "channels", "[", "k", "]", "is", "not", "None", "else", "None", "for", "k", "in", "'RGBA'", ")" ]
Convert a hex-formatted number (i.e., `"#RGB[A]"` or `"#RRGGBB[AA]"`) to an RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`). Args: hex_color (str) : hex-formatted number (e.g., `"#2fc"`, `"#3c2f8611"`) normalize_to (int, float) : Factor to normalize each channel by Returns: (tuple) : RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`), where range of each channel in tuple is `[0, normalize_to]`.
[ "Convert", "a", "hex", "-", "formatted", "number", "(", "i", ".", "e", ".", "#RGB", "[", "A", "]", "or", "#RRGGBB", "[", "AA", "]", ")", "to", "an", "RGBA", "tuple", "(", "i", ".", "e", ".", "(", "<r", ">", "<g", ">", "<b", ">", "<a", ">", ")", ")", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/nodes.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/nodes.py#L738-L751
def process_files(self): """ process_files: goes through question fields and replaces image strings Args: None Returns: None """ config.LOGGER.info("\t*** Processing images for exercise: {}".format(self.title)) downloaded = super(ExerciseNode, self).process_files() for question in self.questions: downloaded += question.process_question() self.process_exercise_data() config.LOGGER.info("\t*** Images for {} have been processed".format(self.title)) return downloaded
[ "def", "process_files", "(", "self", ")", ":", "config", ".", "LOGGER", ".", "info", "(", "\"\\t*** Processing images for exercise: {}\"", ".", "format", "(", "self", ".", "title", ")", ")", "downloaded", "=", "super", "(", "ExerciseNode", ",", "self", ")", ".", "process_files", "(", ")", "for", "question", "in", "self", ".", "questions", ":", "downloaded", "+=", "question", ".", "process_question", "(", ")", "self", ".", "process_exercise_data", "(", ")", "config", ".", "LOGGER", ".", "info", "(", "\"\\t*** Images for {} have been processed\"", ".", "format", "(", "self", ".", "title", ")", ")", "return", "downloaded" ]
process_files: goes through question fields and replaces image strings Args: None Returns: None
[ "process_files", ":", "goes", "through", "question", "fields", "and", "replaces", "image", "strings", "Args", ":", "None", "Returns", ":", "None" ]
python
train
lcgong/redbean
redbean/handler_argument.py
https://github.com/lcgong/redbean/blob/45df9ff1e807e742771c752808d7fdac4007c919/redbean/handler_argument.py#L11-L50
def argument_getter_factory(route_spec): """ 根据spec来生成handler函数参数值读取器 """ func_sig = inspect.signature(route_spec.handler_func) param_names = list(func_sig.parameters.keys()) arg_getters = build_argval_getters(route_spec) n_args = len(arg_getters) async def argument_getters(request): errors = None values = {} for i in range(n_args): try: arg_name = param_names[i] values[arg_name] = await arg_getters[i](request) except HTTPException: raise except Exception as exc : if errors is None: errors = [] request.app.logger.error('', exc_info=True) exc_type = type(exc) data = { "name": arg_name, "type": f"{exc_type.__module__}.{exc_type.__qualname__}", "error": str(exc) } errors.append(data) if errors is not None: raise RESTfulArgumentError(errors) return values return argument_getters
[ "def", "argument_getter_factory", "(", "route_spec", ")", ":", "func_sig", "=", "inspect", ".", "signature", "(", "route_spec", ".", "handler_func", ")", "param_names", "=", "list", "(", "func_sig", ".", "parameters", ".", "keys", "(", ")", ")", "arg_getters", "=", "build_argval_getters", "(", "route_spec", ")", "n_args", "=", "len", "(", "arg_getters", ")", "async", "def", "argument_getters", "(", "request", ")", ":", "errors", "=", "None", "values", "=", "{", "}", "for", "i", "in", "range", "(", "n_args", ")", ":", "try", ":", "arg_name", "=", "param_names", "[", "i", "]", "values", "[", "arg_name", "]", "=", "await", "arg_getters", "[", "i", "]", "(", "request", ")", "except", "HTTPException", ":", "raise", "except", "Exception", "as", "exc", ":", "if", "errors", "is", "None", ":", "errors", "=", "[", "]", "request", ".", "app", ".", "logger", ".", "error", "(", "''", ",", "exc_info", "=", "True", ")", "exc_type", "=", "type", "(", "exc", ")", "data", "=", "{", "\"name\"", ":", "arg_name", ",", "\"type\"", ":", "f\"{exc_type.__module__}.{exc_type.__qualname__}\"", ",", "\"error\"", ":", "str", "(", "exc", ")", "}", "errors", ".", "append", "(", "data", ")", "if", "errors", "is", "not", "None", ":", "raise", "RESTfulArgumentError", "(", "errors", ")", "return", "values", "return", "argument_getters" ]
根据spec来生成handler函数参数值读取器
[ "根据spec来生成handler函数参数值读取器" ]
python
train
materialsproject/custodian
custodian/qchem/jobs.py
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/qchem/jobs.py#L132-L270
def opt_with_frequency_flattener(cls, qchem_command, multimode="openmp", input_file="mol.qin", output_file="mol.qout", qclog_file="mol.qclog", max_iterations=10, max_molecule_perturb_scale=0.3, check_connectivity=True, **QCJob_kwargs): """ Optimize a structure and calculate vibrational frequencies to check if the structure is in a true minima. If a frequency is negative, iteratively perturbe the geometry, optimize, and recalculate frequencies until all are positive, aka a true minima has been found. Args: qchem_command (str): Command to run QChem. multimode (str): Parallelization scheme, either openmp or mpi. input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. max_iterations (int): Number of perturbation -> optimization -> frequency iterations to perform. Defaults to 10. max_molecule_perturb_scale (float): The maximum scaled perturbation that can be applied to the molecule. Defaults to 0.3. check_connectivity (bool): Whether to check differences in connectivity introduced by structural perturbation. Defaults to True. **QCJob_kwargs: Passthrough kwargs to QCJob. See :class:`custodian.qchem.jobs.QCJob`. """ min_molecule_perturb_scale = 0.1 scale_grid = 10 perturb_scale_grid = ( max_molecule_perturb_scale - min_molecule_perturb_scale ) / scale_grid if not os.path.exists(input_file): raise AssertionError('Input file must be present!') orig_opt_input = QCInput.from_file(input_file) orig_opt_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem["job_type"] = "freq" first = True reversed_direction = False num_neg_freqs = [] for ii in range(max_iterations): yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".opt_" + str(ii), backup=first, **QCJob_kwargs)) first = False opt_outdata = QCOutput(output_file + ".opt_" + str(ii)).data if opt_outdata["structure_change"] == "unconnected_fragments" and not opt_outdata["completion"]: print("Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...") break else: freq_QCInput = QCInput( molecule=opt_outdata.get("molecule_from_optimized_geometry"), rem=orig_freq_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) freq_QCInput.write_file(input_file) yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".freq_" + str(ii), backup=first, **QCJob_kwargs)) outdata = QCOutput(output_file + ".freq_" + str(ii)).data errors = outdata.get("errors") if len(errors) != 0: raise AssertionError('No errors should be encountered while flattening frequencies!') if outdata.get('frequencies')[0] > 0.0: print("All frequencies positive!") break else: num_neg_freqs += [sum(1 for freq in outdata.get('frequencies') if freq < 0)] if len(num_neg_freqs) > 1: if num_neg_freqs[-1] == num_neg_freqs[-2] and not reversed_direction: reversed_direction = True elif num_neg_freqs[-1] == num_neg_freqs[-2] and reversed_direction: if len(num_neg_freqs) < 3: raise AssertionError("ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...") else: raise Exception("ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...") elif num_neg_freqs[-1] != num_neg_freqs[-2] and reversed_direction: reversed_direction = False negative_freq_vecs = outdata.get("frequency_mode_vectors")[0] structure_successfully_perturbed = False for molecule_perturb_scale in np.arange( max_molecule_perturb_scale, min_molecule_perturb_scale, -perturb_scale_grid): new_coords = perturb_coordinates( old_coords=outdata.get("initial_geometry"), negative_freq_vecs=negative_freq_vecs, molecule_perturb_scale=molecule_perturb_scale, reversed_direction=reversed_direction) new_molecule = Molecule( species=outdata.get('species'), coords=new_coords, charge=outdata.get('charge'), spin_multiplicity=outdata.get('multiplicity')) if check_connectivity: old_molgraph = MoleculeGraph.with_local_env_strategy(outdata.get("initial_molecule"), OpenBabelNN(), reorder=False, extend_structure=False) new_molgraph = MoleculeGraph.with_local_env_strategy(new_molecule, OpenBabelNN(), reorder=False, extend_structure=False) if old_molgraph.isomorphic_to(new_molgraph): structure_successfully_perturbed = True break if not structure_successfully_perturbed: raise Exception( "ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting..." ) new_opt_QCInput = QCInput( molecule=new_molecule, rem=orig_opt_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) new_opt_QCInput.write_file(input_file)
[ "def", "opt_with_frequency_flattener", "(", "cls", ",", "qchem_command", ",", "multimode", "=", "\"openmp\"", ",", "input_file", "=", "\"mol.qin\"", ",", "output_file", "=", "\"mol.qout\"", ",", "qclog_file", "=", "\"mol.qclog\"", ",", "max_iterations", "=", "10", ",", "max_molecule_perturb_scale", "=", "0.3", ",", "check_connectivity", "=", "True", ",", "*", "*", "QCJob_kwargs", ")", ":", "min_molecule_perturb_scale", "=", "0.1", "scale_grid", "=", "10", "perturb_scale_grid", "=", "(", "max_molecule_perturb_scale", "-", "min_molecule_perturb_scale", ")", "/", "scale_grid", "if", "not", "os", ".", "path", ".", "exists", "(", "input_file", ")", ":", "raise", "AssertionError", "(", "'Input file must be present!'", ")", "orig_opt_input", "=", "QCInput", ".", "from_file", "(", "input_file", ")", "orig_opt_rem", "=", "copy", ".", "deepcopy", "(", "orig_opt_input", ".", "rem", ")", "orig_freq_rem", "=", "copy", ".", "deepcopy", "(", "orig_opt_input", ".", "rem", ")", "orig_freq_rem", "[", "\"job_type\"", "]", "=", "\"freq\"", "first", "=", "True", "reversed_direction", "=", "False", "num_neg_freqs", "=", "[", "]", "for", "ii", "in", "range", "(", "max_iterations", ")", ":", "yield", "(", "QCJob", "(", "qchem_command", "=", "qchem_command", ",", "multimode", "=", "multimode", ",", "input_file", "=", "input_file", ",", "output_file", "=", "output_file", ",", "qclog_file", "=", "qclog_file", ",", "suffix", "=", "\".opt_\"", "+", "str", "(", "ii", ")", ",", "backup", "=", "first", ",", "*", "*", "QCJob_kwargs", ")", ")", "first", "=", "False", "opt_outdata", "=", "QCOutput", "(", "output_file", "+", "\".opt_\"", "+", "str", "(", "ii", ")", ")", ".", "data", "if", "opt_outdata", "[", "\"structure_change\"", "]", "==", "\"unconnected_fragments\"", "and", "not", "opt_outdata", "[", "\"completion\"", "]", ":", "print", "(", "\"Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...\"", ")", "break", "else", ":", "freq_QCInput", "=", "QCInput", "(", "molecule", "=", "opt_outdata", ".", "get", "(", "\"molecule_from_optimized_geometry\"", ")", ",", "rem", "=", "orig_freq_rem", ",", "opt", "=", "orig_opt_input", ".", "opt", ",", "pcm", "=", "orig_opt_input", ".", "pcm", ",", "solvent", "=", "orig_opt_input", ".", "solvent", ")", "freq_QCInput", ".", "write_file", "(", "input_file", ")", "yield", "(", "QCJob", "(", "qchem_command", "=", "qchem_command", ",", "multimode", "=", "multimode", ",", "input_file", "=", "input_file", ",", "output_file", "=", "output_file", ",", "qclog_file", "=", "qclog_file", ",", "suffix", "=", "\".freq_\"", "+", "str", "(", "ii", ")", ",", "backup", "=", "first", ",", "*", "*", "QCJob_kwargs", ")", ")", "outdata", "=", "QCOutput", "(", "output_file", "+", "\".freq_\"", "+", "str", "(", "ii", ")", ")", ".", "data", "errors", "=", "outdata", ".", "get", "(", "\"errors\"", ")", "if", "len", "(", "errors", ")", "!=", "0", ":", "raise", "AssertionError", "(", "'No errors should be encountered while flattening frequencies!'", ")", "if", "outdata", ".", "get", "(", "'frequencies'", ")", "[", "0", "]", ">", "0.0", ":", "print", "(", "\"All frequencies positive!\"", ")", "break", "else", ":", "num_neg_freqs", "+=", "[", "sum", "(", "1", "for", "freq", "in", "outdata", ".", "get", "(", "'frequencies'", ")", "if", "freq", "<", "0", ")", "]", "if", "len", "(", "num_neg_freqs", ")", ">", "1", ":", "if", "num_neg_freqs", "[", "-", "1", "]", "==", "num_neg_freqs", "[", "-", "2", "]", "and", "not", "reversed_direction", ":", "reversed_direction", "=", "True", "elif", "num_neg_freqs", "[", "-", "1", "]", "==", "num_neg_freqs", "[", "-", "2", "]", "and", "reversed_direction", ":", "if", "len", "(", "num_neg_freqs", ")", "<", "3", ":", "raise", "AssertionError", "(", "\"ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...\"", ")", "else", ":", "raise", "Exception", "(", "\"ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...\"", ")", "elif", "num_neg_freqs", "[", "-", "1", "]", "!=", "num_neg_freqs", "[", "-", "2", "]", "and", "reversed_direction", ":", "reversed_direction", "=", "False", "negative_freq_vecs", "=", "outdata", ".", "get", "(", "\"frequency_mode_vectors\"", ")", "[", "0", "]", "structure_successfully_perturbed", "=", "False", "for", "molecule_perturb_scale", "in", "np", ".", "arange", "(", "max_molecule_perturb_scale", ",", "min_molecule_perturb_scale", ",", "-", "perturb_scale_grid", ")", ":", "new_coords", "=", "perturb_coordinates", "(", "old_coords", "=", "outdata", ".", "get", "(", "\"initial_geometry\"", ")", ",", "negative_freq_vecs", "=", "negative_freq_vecs", ",", "molecule_perturb_scale", "=", "molecule_perturb_scale", ",", "reversed_direction", "=", "reversed_direction", ")", "new_molecule", "=", "Molecule", "(", "species", "=", "outdata", ".", "get", "(", "'species'", ")", ",", "coords", "=", "new_coords", ",", "charge", "=", "outdata", ".", "get", "(", "'charge'", ")", ",", "spin_multiplicity", "=", "outdata", ".", "get", "(", "'multiplicity'", ")", ")", "if", "check_connectivity", ":", "old_molgraph", "=", "MoleculeGraph", ".", "with_local_env_strategy", "(", "outdata", ".", "get", "(", "\"initial_molecule\"", ")", ",", "OpenBabelNN", "(", ")", ",", "reorder", "=", "False", ",", "extend_structure", "=", "False", ")", "new_molgraph", "=", "MoleculeGraph", ".", "with_local_env_strategy", "(", "new_molecule", ",", "OpenBabelNN", "(", ")", ",", "reorder", "=", "False", ",", "extend_structure", "=", "False", ")", "if", "old_molgraph", ".", "isomorphic_to", "(", "new_molgraph", ")", ":", "structure_successfully_perturbed", "=", "True", "break", "if", "not", "structure_successfully_perturbed", ":", "raise", "Exception", "(", "\"ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting...\"", ")", "new_opt_QCInput", "=", "QCInput", "(", "molecule", "=", "new_molecule", ",", "rem", "=", "orig_opt_rem", ",", "opt", "=", "orig_opt_input", ".", "opt", ",", "pcm", "=", "orig_opt_input", ".", "pcm", ",", "solvent", "=", "orig_opt_input", ".", "solvent", ")", "new_opt_QCInput", ".", "write_file", "(", "input_file", ")" ]
Optimize a structure and calculate vibrational frequencies to check if the structure is in a true minima. If a frequency is negative, iteratively perturbe the geometry, optimize, and recalculate frequencies until all are positive, aka a true minima has been found. Args: qchem_command (str): Command to run QChem. multimode (str): Parallelization scheme, either openmp or mpi. input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. max_iterations (int): Number of perturbation -> optimization -> frequency iterations to perform. Defaults to 10. max_molecule_perturb_scale (float): The maximum scaled perturbation that can be applied to the molecule. Defaults to 0.3. check_connectivity (bool): Whether to check differences in connectivity introduced by structural perturbation. Defaults to True. **QCJob_kwargs: Passthrough kwargs to QCJob. See :class:`custodian.qchem.jobs.QCJob`.
[ "Optimize", "a", "structure", "and", "calculate", "vibrational", "frequencies", "to", "check", "if", "the", "structure", "is", "in", "a", "true", "minima", ".", "If", "a", "frequency", "is", "negative", "iteratively", "perturbe", "the", "geometry", "optimize", "and", "recalculate", "frequencies", "until", "all", "are", "positive", "aka", "a", "true", "minima", "has", "been", "found", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py#L176-L192
def new_metadata(name=None, authors=None, license=None, created=None, modified=None, gistid=None): """Create a new metadata node.""" metadata = NotebookNode() if name is not None: metadata.name = unicode(name) if authors is not None: metadata.authors = list(authors) if created is not None: metadata.created = unicode(created) if modified is not None: metadata.modified = unicode(modified) if license is not None: metadata.license = unicode(license) if gistid is not None: metadata.gistid = unicode(gistid) return metadata
[ "def", "new_metadata", "(", "name", "=", "None", ",", "authors", "=", "None", ",", "license", "=", "None", ",", "created", "=", "None", ",", "modified", "=", "None", ",", "gistid", "=", "None", ")", ":", "metadata", "=", "NotebookNode", "(", ")", "if", "name", "is", "not", "None", ":", "metadata", ".", "name", "=", "unicode", "(", "name", ")", "if", "authors", "is", "not", "None", ":", "metadata", ".", "authors", "=", "list", "(", "authors", ")", "if", "created", "is", "not", "None", ":", "metadata", ".", "created", "=", "unicode", "(", "created", ")", "if", "modified", "is", "not", "None", ":", "metadata", ".", "modified", "=", "unicode", "(", "modified", ")", "if", "license", "is", "not", "None", ":", "metadata", ".", "license", "=", "unicode", "(", "license", ")", "if", "gistid", "is", "not", "None", ":", "metadata", ".", "gistid", "=", "unicode", "(", "gistid", ")", "return", "metadata" ]
Create a new metadata node.
[ "Create", "a", "new", "metadata", "node", "." ]
python
test
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/data_layers/alchemy.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/data_layers/alchemy.py#L519-L534
def paginate_query(self, query, paginate_info): """Paginate query according to jsonapi 1.0 :param Query query: sqlalchemy queryset :param dict paginate_info: pagination information :return Query: the paginated query """ if int(paginate_info.get('size', 1)) == 0: return query page_size = int(paginate_info.get('size', 0)) or current_app.config['PAGE_SIZE'] query = query.limit(page_size) if paginate_info.get('number'): query = query.offset((int(paginate_info['number']) - 1) * page_size) return query
[ "def", "paginate_query", "(", "self", ",", "query", ",", "paginate_info", ")", ":", "if", "int", "(", "paginate_info", ".", "get", "(", "'size'", ",", "1", ")", ")", "==", "0", ":", "return", "query", "page_size", "=", "int", "(", "paginate_info", ".", "get", "(", "'size'", ",", "0", ")", ")", "or", "current_app", ".", "config", "[", "'PAGE_SIZE'", "]", "query", "=", "query", ".", "limit", "(", "page_size", ")", "if", "paginate_info", ".", "get", "(", "'number'", ")", ":", "query", "=", "query", ".", "offset", "(", "(", "int", "(", "paginate_info", "[", "'number'", "]", ")", "-", "1", ")", "*", "page_size", ")", "return", "query" ]
Paginate query according to jsonapi 1.0 :param Query query: sqlalchemy queryset :param dict paginate_info: pagination information :return Query: the paginated query
[ "Paginate", "query", "according", "to", "jsonapi", "1", ".", "0" ]
python
train
buildbot/buildbot
master/buildbot/process/cache.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/process/cache.py#L40-L59
def get_cache(self, cache_name, miss_fn): """ Get an L{AsyncLRUCache} object with the given name. If such an object does not exist, it will be created. Since the cache is permanent, this method can be called only once, e.g., in C{startService}, and it value stored indefinitely. @param cache_name: name of the cache (usually the name of the type of object it stores) @param miss_fn: miss function for the cache; see L{AsyncLRUCache} constructor. @returns: L{AsyncLRUCache} instance """ try: return self._caches[cache_name] except KeyError: max_size = self.config.get(cache_name, self.DEFAULT_CACHE_SIZE) assert max_size >= 1 c = self._caches[cache_name] = lru.AsyncLRUCache(miss_fn, max_size) return c
[ "def", "get_cache", "(", "self", ",", "cache_name", ",", "miss_fn", ")", ":", "try", ":", "return", "self", ".", "_caches", "[", "cache_name", "]", "except", "KeyError", ":", "max_size", "=", "self", ".", "config", ".", "get", "(", "cache_name", ",", "self", ".", "DEFAULT_CACHE_SIZE", ")", "assert", "max_size", ">=", "1", "c", "=", "self", ".", "_caches", "[", "cache_name", "]", "=", "lru", ".", "AsyncLRUCache", "(", "miss_fn", ",", "max_size", ")", "return", "c" ]
Get an L{AsyncLRUCache} object with the given name. If such an object does not exist, it will be created. Since the cache is permanent, this method can be called only once, e.g., in C{startService}, and it value stored indefinitely. @param cache_name: name of the cache (usually the name of the type of object it stores) @param miss_fn: miss function for the cache; see L{AsyncLRUCache} constructor. @returns: L{AsyncLRUCache} instance
[ "Get", "an", "L", "{", "AsyncLRUCache", "}", "object", "with", "the", "given", "name", ".", "If", "such", "an", "object", "does", "not", "exist", "it", "will", "be", "created", ".", "Since", "the", "cache", "is", "permanent", "this", "method", "can", "be", "called", "only", "once", "e", ".", "g", ".", "in", "C", "{", "startService", "}", "and", "it", "value", "stored", "indefinitely", "." ]
python
train
Microsoft/nni
tools/nni_annotation/__init__.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_annotation/__init__.py#L36-L61
def generate_search_space(code_dir): """Generate search space from Python source code. Return a serializable search space object. code_dir: directory path of source files (str) """ search_space = {} if code_dir.endswith(slash): code_dir = code_dir[:-1] for subdir, _, files in os.walk(code_dir): # generate module name from path if subdir == code_dir: package = '' else: assert subdir.startswith(code_dir + slash), subdir prefix_len = len(code_dir) + 1 package = subdir[prefix_len:].replace(slash, '.') + '.' for file_name in files: if file_name.endswith('.py'): path = os.path.join(subdir, file_name) module = package + file_name[:-3] search_space.update(_generate_file_search_space(path, module)) return search_space
[ "def", "generate_search_space", "(", "code_dir", ")", ":", "search_space", "=", "{", "}", "if", "code_dir", ".", "endswith", "(", "slash", ")", ":", "code_dir", "=", "code_dir", "[", ":", "-", "1", "]", "for", "subdir", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "code_dir", ")", ":", "# generate module name from path", "if", "subdir", "==", "code_dir", ":", "package", "=", "''", "else", ":", "assert", "subdir", ".", "startswith", "(", "code_dir", "+", "slash", ")", ",", "subdir", "prefix_len", "=", "len", "(", "code_dir", ")", "+", "1", "package", "=", "subdir", "[", "prefix_len", ":", "]", ".", "replace", "(", "slash", ",", "'.'", ")", "+", "'.'", "for", "file_name", "in", "files", ":", "if", "file_name", ".", "endswith", "(", "'.py'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "subdir", ",", "file_name", ")", "module", "=", "package", "+", "file_name", "[", ":", "-", "3", "]", "search_space", ".", "update", "(", "_generate_file_search_space", "(", "path", ",", "module", ")", ")", "return", "search_space" ]
Generate search space from Python source code. Return a serializable search space object. code_dir: directory path of source files (str)
[ "Generate", "search", "space", "from", "Python", "source", "code", ".", "Return", "a", "serializable", "search", "space", "object", ".", "code_dir", ":", "directory", "path", "of", "source", "files", "(", "str", ")" ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L828-L866
def set_sampling_strategy(self, sensor_name, strategy_and_params): """Set the sampling strategy for the named sensor Parameters ---------- sensor_name : str Name of the sensor strategy_and_params : seq of str or str As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and parameters are as defined by the KATCP spec. As str contains the same elements in space-separated form. Returns ------- sensor_strategy : tuple (success, info) with success : bool True if setting succeeded for this sensor, else False info : tuple Normalibed sensor strategy and parameters as tuple if success == True else, sys.exc_info() tuple for the error that occured. """ try: strategy_and_params = resource.normalize_strategy_parameters( strategy_and_params) self._strategy_cache[sensor_name] = strategy_and_params reply = yield self._inspecting_client.wrapped_request( 'sensor-sampling', sensor_name, *strategy_and_params) if not reply.succeeded: raise KATCPSensorError('Error setting strategy for sensor {0}: \n' '{1!s}'.format(sensor_name, reply)) sensor_strategy = (True, strategy_and_params) except Exception as e: self._logger.exception('Exception found!') sensor_strategy = (False, str(e)) raise tornado.gen.Return(sensor_strategy)
[ "def", "set_sampling_strategy", "(", "self", ",", "sensor_name", ",", "strategy_and_params", ")", ":", "try", ":", "strategy_and_params", "=", "resource", ".", "normalize_strategy_parameters", "(", "strategy_and_params", ")", "self", ".", "_strategy_cache", "[", "sensor_name", "]", "=", "strategy_and_params", "reply", "=", "yield", "self", ".", "_inspecting_client", ".", "wrapped_request", "(", "'sensor-sampling'", ",", "sensor_name", ",", "*", "strategy_and_params", ")", "if", "not", "reply", ".", "succeeded", ":", "raise", "KATCPSensorError", "(", "'Error setting strategy for sensor {0}: \\n'", "'{1!s}'", ".", "format", "(", "sensor_name", ",", "reply", ")", ")", "sensor_strategy", "=", "(", "True", ",", "strategy_and_params", ")", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "'Exception found!'", ")", "sensor_strategy", "=", "(", "False", ",", "str", "(", "e", ")", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "sensor_strategy", ")" ]
Set the sampling strategy for the named sensor Parameters ---------- sensor_name : str Name of the sensor strategy_and_params : seq of str or str As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and parameters are as defined by the KATCP spec. As str contains the same elements in space-separated form. Returns ------- sensor_strategy : tuple (success, info) with success : bool True if setting succeeded for this sensor, else False info : tuple Normalibed sensor strategy and parameters as tuple if success == True else, sys.exc_info() tuple for the error that occured.
[ "Set", "the", "sampling", "strategy", "for", "the", "named", "sensor" ]
python
train
tradenity/python-sdk
tradenity/resources/product.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/product.py#L1096-L1116
def create_product(cls, product, **kwargs): """Create Product Create a new Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_product(product, async=True) >>> result = thread.get() :param async bool :param Product product: Attributes of product to create (required) :return: Product If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_product_with_http_info(product, **kwargs) else: (data) = cls._create_product_with_http_info(product, **kwargs) return data
[ "def", "create_product", "(", "cls", ",", "product", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_create_product_with_http_info", "(", "product", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_create_product_with_http_info", "(", "product", ",", "*", "*", "kwargs", ")", "return", "data" ]
Create Product Create a new Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_product(product, async=True) >>> result = thread.get() :param async bool :param Product product: Attributes of product to create (required) :return: Product If the method is called asynchronously, returns the request thread.
[ "Create", "Product" ]
python
train
erikrose/parsimonious
parsimonious/nodes.py
https://github.com/erikrose/parsimonious/blob/12263be5ceca89344905c2c3eb9ac5a603e976e1/parsimonious/nodes.py#L275-L289
def _parse_or_match(self, text, pos, method_name): """Execute a parse or match on the default grammar, followed by a visitation. Raise RuntimeError if there is no default grammar specified. """ if not self.grammar: raise RuntimeError( "The {cls}.{method}() shortcut won't work because {cls} was " "never associated with a specific " "grammar. Fill out its " "`grammar` attribute, and try again.".format( cls=self.__class__.__name__, method=method_name)) return self.visit(getattr(self.grammar, method_name)(text, pos=pos))
[ "def", "_parse_or_match", "(", "self", ",", "text", ",", "pos", ",", "method_name", ")", ":", "if", "not", "self", ".", "grammar", ":", "raise", "RuntimeError", "(", "\"The {cls}.{method}() shortcut won't work because {cls} was \"", "\"never associated with a specific \"", "\"grammar. Fill out its \"", "\"`grammar` attribute, and try again.\"", ".", "format", "(", "cls", "=", "self", ".", "__class__", ".", "__name__", ",", "method", "=", "method_name", ")", ")", "return", "self", ".", "visit", "(", "getattr", "(", "self", ".", "grammar", ",", "method_name", ")", "(", "text", ",", "pos", "=", "pos", ")", ")" ]
Execute a parse or match on the default grammar, followed by a visitation. Raise RuntimeError if there is no default grammar specified.
[ "Execute", "a", "parse", "or", "match", "on", "the", "default", "grammar", "followed", "by", "a", "visitation", "." ]
python
train
hugapi/hug
hug/api.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/api.py#L294-L319
def documentation_404(self, base_url=None): """Returns a smart 404 page that contains documentation for the written API""" base_url = self.base_url if base_url is None else base_url def handle_404(request, response, *args, **kwargs): url_prefix = request.forwarded_uri[:-1] if request.path and request.path != "/": url_prefix = request.forwarded_uri.split(request.path)[0] to_return = OrderedDict() to_return['404'] = ("The API call you tried to make was not defined. " "Here's a definition of the API to help you get going :)") to_return['documentation'] = self.documentation(base_url, self.determine_version(request, False), prefix=url_prefix) if self.output_format == hug.output_format.json: response.data = hug.output_format.json(to_return, indent=4, separators=(',', ': ')) response.content_type = 'application/json; charset=utf-8' else: response.data = self.output_format(to_return, request=request, response=response) response.content_type = self.output_format.content_type response.status = falcon.HTTP_NOT_FOUND handle_404.interface = True return handle_404
[ "def", "documentation_404", "(", "self", ",", "base_url", "=", "None", ")", ":", "base_url", "=", "self", ".", "base_url", "if", "base_url", "is", "None", "else", "base_url", "def", "handle_404", "(", "request", ",", "response", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url_prefix", "=", "request", ".", "forwarded_uri", "[", ":", "-", "1", "]", "if", "request", ".", "path", "and", "request", ".", "path", "!=", "\"/\"", ":", "url_prefix", "=", "request", ".", "forwarded_uri", ".", "split", "(", "request", ".", "path", ")", "[", "0", "]", "to_return", "=", "OrderedDict", "(", ")", "to_return", "[", "'404'", "]", "=", "(", "\"The API call you tried to make was not defined. \"", "\"Here's a definition of the API to help you get going :)\"", ")", "to_return", "[", "'documentation'", "]", "=", "self", ".", "documentation", "(", "base_url", ",", "self", ".", "determine_version", "(", "request", ",", "False", ")", ",", "prefix", "=", "url_prefix", ")", "if", "self", ".", "output_format", "==", "hug", ".", "output_format", ".", "json", ":", "response", ".", "data", "=", "hug", ".", "output_format", ".", "json", "(", "to_return", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "response", ".", "content_type", "=", "'application/json; charset=utf-8'", "else", ":", "response", ".", "data", "=", "self", ".", "output_format", "(", "to_return", ",", "request", "=", "request", ",", "response", "=", "response", ")", "response", ".", "content_type", "=", "self", ".", "output_format", ".", "content_type", "response", ".", "status", "=", "falcon", ".", "HTTP_NOT_FOUND", "handle_404", ".", "interface", "=", "True", "return", "handle_404" ]
Returns a smart 404 page that contains documentation for the written API
[ "Returns", "a", "smart", "404", "page", "that", "contains", "documentation", "for", "the", "written", "API" ]
python
train
bitcraft/PyTMX
pytmx/pytmx.py
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1146-L1187
def parse_xml(self, node): """ Parse an Object from ElementTree xml node :param node: ElementTree xml node :return: self """ def read_points(text): """parse a text string of float tuples and return [(x,...),...] """ return tuple(tuple(map(float, i.split(','))) for i in text.split()) self._set_properties(node) # correctly handle "tile objects" (object with gid set) if self.gid: self.gid = self.parent.register_gid(self.gid) points = None polygon = node.find('polygon') if polygon is not None: points = read_points(polygon.get('points')) self.closed = True polyline = node.find('polyline') if polyline is not None: points = read_points(polyline.get('points')) self.closed = False if points: x1 = x2 = y1 = y2 = 0 for x, y in points: if x < x1: x1 = x if x > x2: x2 = x if y < y1: y1 = y if y > y2: y2 = y self.width = abs(x1) + abs(x2) self.height = abs(y1) + abs(y2) self.points = tuple( [(i[0] + self.x, i[1] + self.y) for i in points]) return self
[ "def", "parse_xml", "(", "self", ",", "node", ")", ":", "def", "read_points", "(", "text", ")", ":", "\"\"\"parse a text string of float tuples and return [(x,...),...]\n \"\"\"", "return", "tuple", "(", "tuple", "(", "map", "(", "float", ",", "i", ".", "split", "(", "','", ")", ")", ")", "for", "i", "in", "text", ".", "split", "(", ")", ")", "self", ".", "_set_properties", "(", "node", ")", "# correctly handle \"tile objects\" (object with gid set)", "if", "self", ".", "gid", ":", "self", ".", "gid", "=", "self", ".", "parent", ".", "register_gid", "(", "self", ".", "gid", ")", "points", "=", "None", "polygon", "=", "node", ".", "find", "(", "'polygon'", ")", "if", "polygon", "is", "not", "None", ":", "points", "=", "read_points", "(", "polygon", ".", "get", "(", "'points'", ")", ")", "self", ".", "closed", "=", "True", "polyline", "=", "node", ".", "find", "(", "'polyline'", ")", "if", "polyline", "is", "not", "None", ":", "points", "=", "read_points", "(", "polyline", ".", "get", "(", "'points'", ")", ")", "self", ".", "closed", "=", "False", "if", "points", ":", "x1", "=", "x2", "=", "y1", "=", "y2", "=", "0", "for", "x", ",", "y", "in", "points", ":", "if", "x", "<", "x1", ":", "x1", "=", "x", "if", "x", ">", "x2", ":", "x2", "=", "x", "if", "y", "<", "y1", ":", "y1", "=", "y", "if", "y", ">", "y2", ":", "y2", "=", "y", "self", ".", "width", "=", "abs", "(", "x1", ")", "+", "abs", "(", "x2", ")", "self", ".", "height", "=", "abs", "(", "y1", ")", "+", "abs", "(", "y2", ")", "self", ".", "points", "=", "tuple", "(", "[", "(", "i", "[", "0", "]", "+", "self", ".", "x", ",", "i", "[", "1", "]", "+", "self", ".", "y", ")", "for", "i", "in", "points", "]", ")", "return", "self" ]
Parse an Object from ElementTree xml node :param node: ElementTree xml node :return: self
[ "Parse", "an", "Object", "from", "ElementTree", "xml", "node" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/servers/server_profiles.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/server_profiles.py#L108-L135
def delete_all(self, filter, timeout=-1, force=False): """ Deletes all Server Profile objects from the appliance that match the provided filter. Filters are supported only for the following profile attributes: name, description, serialnumber, uuid, mactype, wwntype, serialnumbertype, status, and state. Examples: >>> server_profile_client.delete_all(filter="name='Exchange Server'") # Remove all profiles that match the name "Exchange Server" >>> server_profile_client.delete_all(filter="name matches'%25Database%25'") # Remove all profiles that have the word "Database" in its name The filter function here operates similarly to the function defined for GET Server Profiles. It allows for both actual and partial matches of data in the profile. Any requests that use a wildcard match must include a %25 as illustrated in the previous example. This is how you encode that character for transmission to the appliance. Args: filter (dict): Object to delete. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates whether the server profile was successfully deleted. """ return self._helper.delete_all(filter=filter, force=force, timeout=timeout)
[ "def", "delete_all", "(", "self", ",", "filter", ",", "timeout", "=", "-", "1", ",", "force", "=", "False", ")", ":", "return", "self", ".", "_helper", ".", "delete_all", "(", "filter", "=", "filter", ",", "force", "=", "force", ",", "timeout", "=", "timeout", ")" ]
Deletes all Server Profile objects from the appliance that match the provided filter. Filters are supported only for the following profile attributes: name, description, serialnumber, uuid, mactype, wwntype, serialnumbertype, status, and state. Examples: >>> server_profile_client.delete_all(filter="name='Exchange Server'") # Remove all profiles that match the name "Exchange Server" >>> server_profile_client.delete_all(filter="name matches'%25Database%25'") # Remove all profiles that have the word "Database" in its name The filter function here operates similarly to the function defined for GET Server Profiles. It allows for both actual and partial matches of data in the profile. Any requests that use a wildcard match must include a %25 as illustrated in the previous example. This is how you encode that character for transmission to the appliance. Args: filter (dict): Object to delete. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates whether the server profile was successfully deleted.
[ "Deletes", "all", "Server", "Profile", "objects", "from", "the", "appliance", "that", "match", "the", "provided", "filter", ".", "Filters", "are", "supported", "only", "for", "the", "following", "profile", "attributes", ":", "name", "description", "serialnumber", "uuid", "mactype", "wwntype", "serialnumbertype", "status", "and", "state", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L1292-L1344
def submit(self, poll=True, errors=True, process_files=True, halt_on_error=True): """Submit Batch request to ThreatConnect API. By default this method will submit the job request and data and if the size of the data is below the value **synchronousBatchSaveLimit** set in System Setting it will process the request synchronously and return the batch status. If the size of the batch is greater than the value set the batch job will be queued. Errors are not retrieve automatically and need to be enabled. If any of the submit, poll, or error methods fail the entire submit will halt at the point of failure. The behavior can be changed by setting halt_on_error to False. Each of these methods can also be called on their own for greater control of the submit process. Args: poll (bool, default:True): Poll for status. errors (bool, default:True): Retrieve any batch errors (only if poll is True). process_files (bool, default:True): Send any document or report attachments to the API. halt_on_error (bool, default:True): If True any exception will raise an error. Returns. dict: The Batch Status from the ThreatConnect API. """ batch_data = ( self.submit_create_and_upload(halt_on_error).get('data', {}).get('batchStatus', {}) ) batch_id = batch_data.get('id') if batch_id is not None: self.tcex.log.info('Batch ID: {}'.format(batch_id)) # job hit queue if poll: # poll for status batch_data = ( self.poll(batch_id, halt_on_error=halt_on_error) .get('data', {}) .get('batchStatus') ) if errors: # retrieve errors error_groups = batch_data.get('errorGroupCount', 0) error_indicators = batch_data.get('errorIndicatorCount', 0) if error_groups > 0 or error_indicators > 0: self.tcex.log.debug('retrieving batch errors') batch_data['errors'] = self.errors(batch_id) else: # can't process files if status is unknown (polling must be enabled) process_files = False if process_files: # submit file data after batch job is complete batch_data['uploadStatus'] = self.submit_files(halt_on_error) return batch_data
[ "def", "submit", "(", "self", ",", "poll", "=", "True", ",", "errors", "=", "True", ",", "process_files", "=", "True", ",", "halt_on_error", "=", "True", ")", ":", "batch_data", "=", "(", "self", ".", "submit_create_and_upload", "(", "halt_on_error", ")", ".", "get", "(", "'data'", ",", "{", "}", ")", ".", "get", "(", "'batchStatus'", ",", "{", "}", ")", ")", "batch_id", "=", "batch_data", ".", "get", "(", "'id'", ")", "if", "batch_id", "is", "not", "None", ":", "self", ".", "tcex", ".", "log", ".", "info", "(", "'Batch ID: {}'", ".", "format", "(", "batch_id", ")", ")", "# job hit queue", "if", "poll", ":", "# poll for status", "batch_data", "=", "(", "self", ".", "poll", "(", "batch_id", ",", "halt_on_error", "=", "halt_on_error", ")", ".", "get", "(", "'data'", ",", "{", "}", ")", ".", "get", "(", "'batchStatus'", ")", ")", "if", "errors", ":", "# retrieve errors", "error_groups", "=", "batch_data", ".", "get", "(", "'errorGroupCount'", ",", "0", ")", "error_indicators", "=", "batch_data", ".", "get", "(", "'errorIndicatorCount'", ",", "0", ")", "if", "error_groups", ">", "0", "or", "error_indicators", ">", "0", ":", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'retrieving batch errors'", ")", "batch_data", "[", "'errors'", "]", "=", "self", ".", "errors", "(", "batch_id", ")", "else", ":", "# can't process files if status is unknown (polling must be enabled)", "process_files", "=", "False", "if", "process_files", ":", "# submit file data after batch job is complete", "batch_data", "[", "'uploadStatus'", "]", "=", "self", ".", "submit_files", "(", "halt_on_error", ")", "return", "batch_data" ]
Submit Batch request to ThreatConnect API. By default this method will submit the job request and data and if the size of the data is below the value **synchronousBatchSaveLimit** set in System Setting it will process the request synchronously and return the batch status. If the size of the batch is greater than the value set the batch job will be queued. Errors are not retrieve automatically and need to be enabled. If any of the submit, poll, or error methods fail the entire submit will halt at the point of failure. The behavior can be changed by setting halt_on_error to False. Each of these methods can also be called on their own for greater control of the submit process. Args: poll (bool, default:True): Poll for status. errors (bool, default:True): Retrieve any batch errors (only if poll is True). process_files (bool, default:True): Send any document or report attachments to the API. halt_on_error (bool, default:True): If True any exception will raise an error. Returns. dict: The Batch Status from the ThreatConnect API.
[ "Submit", "Batch", "request", "to", "ThreatConnect", "API", "." ]
python
train
marteinn/AtomicPress
atomicpress/utils/files.py
https://github.com/marteinn/AtomicPress/blob/b8a0ca9c9c327f062833fc4a401a8ac0baccf6d1/atomicpress/utils/files.py#L16-L36
def generate_image_from_url(url=None, timeout=30): """ Downloads and saves a image from url into a file. """ file_name = posixpath.basename(url) img_tmp = NamedTemporaryFile(delete=True) try: response = requests.get(url, timeout=timeout) response.raise_for_status() except Exception as e: # NOQA return None, None img_tmp.write(response.content) img_tmp.flush() image = File(img_tmp) image.seek(0) return file_name, image
[ "def", "generate_image_from_url", "(", "url", "=", "None", ",", "timeout", "=", "30", ")", ":", "file_name", "=", "posixpath", ".", "basename", "(", "url", ")", "img_tmp", "=", "NamedTemporaryFile", "(", "delete", "=", "True", ")", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "timeout", ")", "response", ".", "raise_for_status", "(", ")", "except", "Exception", "as", "e", ":", "# NOQA", "return", "None", ",", "None", "img_tmp", ".", "write", "(", "response", ".", "content", ")", "img_tmp", ".", "flush", "(", ")", "image", "=", "File", "(", "img_tmp", ")", "image", ".", "seek", "(", "0", ")", "return", "file_name", ",", "image" ]
Downloads and saves a image from url into a file.
[ "Downloads", "and", "saves", "a", "image", "from", "url", "into", "a", "file", "." ]
python
train
obriencj/python-javatools
javatools/distdiff.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/distdiff.py#L304-L363
def collect_impl(self): """ emits change instances based on the delta of the two distribution directories """ ld = self.ldata rd = self.rdata deep = not self.shallow for event, entry in compare(ld, rd): if deep and fnmatches(entry, *JAR_PATTERNS): if event == LEFT: yield DistJarRemoved(ld, rd, entry) elif event == RIGHT: yield DistJarAdded(ld, rd, entry) elif event == DIFF: yield DistJarChange(ld, rd, entry, True) elif event == SAME: yield DistJarChange(ld, rd, entry, False) elif deep and fnmatches(entry, "*.class"): if event == LEFT: yield DistClassRemoved(ld, rd, entry) elif event == RIGHT: yield DistClassAdded(ld, rd, entry) elif event == DIFF: yield DistClassChange(ld, rd, entry, True) elif event == SAME: yield DistClassChange(ld, rd, entry, False) elif deep and fnmatches(entry, *TEXT_PATTERNS): if event == LEFT: yield DistContentRemoved(ld, rd, entry) elif event == RIGHT: yield DistContentAdded(ld, rd, entry) elif event == DIFF: yield DistTextChange(ld, rd, entry) elif event == SAME: yield DistTextChange(ld, rd, entry, False) elif deep and fnmatches(entry, "*/MANIFEST.MF"): if event == LEFT: yield DistContentRemoved(ld, rd, entry) elif event == RIGHT: yield DistContentAdded(ld, rd, entry) elif event == DIFF: yield DistManifestChange(ld, rd, entry, True) elif event == SAME: yield DistManifestChange(ld, rd, entry, False) else: if event == LEFT: yield DistContentRemoved(ld, rd, entry) elif event == RIGHT: yield DistContentAdded(ld, rd, entry) elif event == DIFF: yield DistContentChange(ld, rd, entry, True) elif event == SAME: yield DistContentChange(ld, rd, entry, False)
[ "def", "collect_impl", "(", "self", ")", ":", "ld", "=", "self", ".", "ldata", "rd", "=", "self", ".", "rdata", "deep", "=", "not", "self", ".", "shallow", "for", "event", ",", "entry", "in", "compare", "(", "ld", ",", "rd", ")", ":", "if", "deep", "and", "fnmatches", "(", "entry", ",", "*", "JAR_PATTERNS", ")", ":", "if", "event", "==", "LEFT", ":", "yield", "DistJarRemoved", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "RIGHT", ":", "yield", "DistJarAdded", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "DIFF", ":", "yield", "DistJarChange", "(", "ld", ",", "rd", ",", "entry", ",", "True", ")", "elif", "event", "==", "SAME", ":", "yield", "DistJarChange", "(", "ld", ",", "rd", ",", "entry", ",", "False", ")", "elif", "deep", "and", "fnmatches", "(", "entry", ",", "\"*.class\"", ")", ":", "if", "event", "==", "LEFT", ":", "yield", "DistClassRemoved", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "RIGHT", ":", "yield", "DistClassAdded", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "DIFF", ":", "yield", "DistClassChange", "(", "ld", ",", "rd", ",", "entry", ",", "True", ")", "elif", "event", "==", "SAME", ":", "yield", "DistClassChange", "(", "ld", ",", "rd", ",", "entry", ",", "False", ")", "elif", "deep", "and", "fnmatches", "(", "entry", ",", "*", "TEXT_PATTERNS", ")", ":", "if", "event", "==", "LEFT", ":", "yield", "DistContentRemoved", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "RIGHT", ":", "yield", "DistContentAdded", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "DIFF", ":", "yield", "DistTextChange", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "SAME", ":", "yield", "DistTextChange", "(", "ld", ",", "rd", ",", "entry", ",", "False", ")", "elif", "deep", "and", "fnmatches", "(", "entry", ",", "\"*/MANIFEST.MF\"", ")", ":", "if", "event", "==", "LEFT", ":", "yield", "DistContentRemoved", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "RIGHT", ":", "yield", "DistContentAdded", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "DIFF", ":", "yield", "DistManifestChange", "(", "ld", ",", "rd", ",", "entry", ",", "True", ")", "elif", "event", "==", "SAME", ":", "yield", "DistManifestChange", "(", "ld", ",", "rd", ",", "entry", ",", "False", ")", "else", ":", "if", "event", "==", "LEFT", ":", "yield", "DistContentRemoved", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "RIGHT", ":", "yield", "DistContentAdded", "(", "ld", ",", "rd", ",", "entry", ")", "elif", "event", "==", "DIFF", ":", "yield", "DistContentChange", "(", "ld", ",", "rd", ",", "entry", ",", "True", ")", "elif", "event", "==", "SAME", ":", "yield", "DistContentChange", "(", "ld", ",", "rd", ",", "entry", ",", "False", ")" ]
emits change instances based on the delta of the two distribution directories
[ "emits", "change", "instances", "based", "on", "the", "delta", "of", "the", "two", "distribution", "directories" ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/von_mises_fisher.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L36-L73
def _bessel_ive(v, z, cache=None): """Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0.""" # TODO(b/67497980): Switch to a more numerically faithful implementation. z = tf.convert_to_tensor(value=z) wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v )) if float(v) >= 2: raise ValueError( 'Evaluating bessel_i by recurrence becomes imprecise for large v') cache = cache or {} safe_z = tf.where(z > 0, z, tf.ones_like(z)) if v in cache: return wrap(cache[v]) if v == 0: cache[v] = tf.math.bessel_i0e(z) elif v == 1: cache[v] = tf.math.bessel_i1e(z) elif v == 0.5: # sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2 sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2 cache[v] = ( np.sqrt(2 / np.pi) * sinhe(z) * tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z))) elif v == -0.5: # cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2 coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2 cache[v] = ( np.sqrt(2 / np.pi) * coshe(z) * tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z))) if v <= 1: return wrap(cache[v]) # Recurrence relation: cache[v] = (_bessel_ive(v - 2, z, cache) - (2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z) return wrap(cache[v])
[ "def", "_bessel_ive", "(", "v", ",", "z", ",", "cache", "=", "None", ")", ":", "# TODO(b/67497980): Switch to a more numerically faithful implementation.", "z", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "z", ")", "wrap", "=", "lambda", "result", ":", "tf", ".", "debugging", ".", "check_numerics", "(", "result", ",", "'besseli{}'", ".", "format", "(", "v", ")", ")", "if", "float", "(", "v", ")", ">=", "2", ":", "raise", "ValueError", "(", "'Evaluating bessel_i by recurrence becomes imprecise for large v'", ")", "cache", "=", "cache", "or", "{", "}", "safe_z", "=", "tf", ".", "where", "(", "z", ">", "0", ",", "z", ",", "tf", ".", "ones_like", "(", "z", ")", ")", "if", "v", "in", "cache", ":", "return", "wrap", "(", "cache", "[", "v", "]", ")", "if", "v", "==", "0", ":", "cache", "[", "v", "]", "=", "tf", ".", "math", ".", "bessel_i0e", "(", "z", ")", "elif", "v", "==", "1", ":", "cache", "[", "v", "]", "=", "tf", ".", "math", ".", "bessel_i1e", "(", "z", ")", "elif", "v", "==", "0.5", ":", "# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2", "sinhe", "=", "lambda", "x", ":", "(", "tf", ".", "exp", "(", "x", "-", "tf", ".", "abs", "(", "x", ")", ")", "-", "tf", ".", "exp", "(", "-", "x", "-", "tf", ".", "abs", "(", "x", ")", ")", ")", "/", "2", "cache", "[", "v", "]", "=", "(", "np", ".", "sqrt", "(", "2", "/", "np", ".", "pi", ")", "*", "sinhe", "(", "z", ")", "*", "tf", ".", "where", "(", "z", ">", "0", ",", "tf", ".", "math", ".", "rsqrt", "(", "safe_z", ")", ",", "tf", ".", "ones_like", "(", "safe_z", ")", ")", ")", "elif", "v", "==", "-", "0.5", ":", "# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2", "coshe", "=", "lambda", "x", ":", "(", "tf", ".", "exp", "(", "x", "-", "tf", ".", "abs", "(", "x", ")", ")", "+", "tf", ".", "exp", "(", "-", "x", "-", "tf", ".", "abs", "(", "x", ")", ")", ")", "/", "2", "cache", "[", "v", "]", "=", "(", "np", ".", "sqrt", "(", "2", "/", "np", ".", "pi", ")", "*", "coshe", "(", "z", ")", "*", "tf", ".", "where", "(", "z", ">", "0", ",", "tf", ".", "math", ".", "rsqrt", "(", "safe_z", ")", ",", "tf", ".", "ones_like", "(", "safe_z", ")", ")", ")", "if", "v", "<=", "1", ":", "return", "wrap", "(", "cache", "[", "v", "]", ")", "# Recurrence relation:", "cache", "[", "v", "]", "=", "(", "_bessel_ive", "(", "v", "-", "2", ",", "z", ",", "cache", ")", "-", "(", "2", "*", "(", "v", "-", "1", ")", ")", "*", "_bessel_ive", "(", "v", "-", "1", ",", "z", ",", "cache", ")", "/", "z", ")", "return", "wrap", "(", "cache", "[", "v", "]", ")" ]
Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0.
[ "Computes", "I_v", "(", "z", ")", "*", "exp", "(", "-", "abs", "(", "z", "))", "using", "a", "recurrence", "relation", "where", "z", ">", "0", "." ]
python
test
saltstack/salt
salt/states/chocolatey.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/chocolatey.py#L202-L278
def uninstalled(name, version=None, uninstall_args=None, override_args=False): ''' Uninstalls a package name The name of the package to be uninstalled version Uninstalls a specific version of the package. Defaults to latest version installed. uninstall_args A list of uninstall arguments you want to pass to the uninstallation process i.e product key or feature list override_args Set to true if you want to override the original uninstall arguments ( for the native uninstaller)in the package and use your own. When this is set to False uninstall_args will be appended to the end of the default arguments .. code-block: yaml Removemypackage: chocolatey.uninstalled: - name: mypackage - version: '21.5' ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Get list of currently installed packages pre_uninstall = __salt__['chocolatey.list'](local_only=True) # Determine if package is installed if name.lower() in [package.lower() for package in pre_uninstall.keys()]: try: ret['changes'] = { name: '{0} version {1} will be removed'.format( name, pre_uninstall[name][0] ) } except KeyError: ret['changes'] = {name: '{0} will be removed'.format(name)} else: ret['comment'] = 'The package {0} is not installed'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'The uninstall was tested' return ret # Uninstall the package result = __salt__['chocolatey.uninstall'](name, version, uninstall_args, override_args) if 'Running chocolatey failed' not in result: ret['result'] = True else: ret['result'] = False if not ret['result']: ret['comment'] = 'Failed to uninstall the package {0}'.format(name) # Get list of installed packages after 'chocolatey.uninstall' post_uninstall = __salt__['chocolatey.list'](local_only=True) ret['changes'] = salt.utils.data.compare_dicts(pre_uninstall, post_uninstall) return ret
[ "def", "uninstalled", "(", "name", ",", "version", "=", "None", ",", "uninstall_args", "=", "None", ",", "override_args", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "# Get list of currently installed packages", "pre_uninstall", "=", "__salt__", "[", "'chocolatey.list'", "]", "(", "local_only", "=", "True", ")", "# Determine if package is installed", "if", "name", ".", "lower", "(", ")", "in", "[", "package", ".", "lower", "(", ")", "for", "package", "in", "pre_uninstall", ".", "keys", "(", ")", "]", ":", "try", ":", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "'{0} version {1} will be removed'", ".", "format", "(", "name", ",", "pre_uninstall", "[", "name", "]", "[", "0", "]", ")", "}", "except", "KeyError", ":", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "'{0} will be removed'", ".", "format", "(", "name", ")", "}", "else", ":", "ret", "[", "'comment'", "]", "=", "'The package {0} is not installed'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'The uninstall was tested'", "return", "ret", "# Uninstall the package", "result", "=", "__salt__", "[", "'chocolatey.uninstall'", "]", "(", "name", ",", "version", ",", "uninstall_args", ",", "override_args", ")", "if", "'Running chocolatey failed'", "not", "in", "result", ":", "ret", "[", "'result'", "]", "=", "True", "else", ":", "ret", "[", "'result'", "]", "=", "False", "if", "not", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Failed to uninstall the package {0}'", ".", "format", "(", "name", ")", "# Get list of installed packages after 'chocolatey.uninstall'", "post_uninstall", "=", "__salt__", "[", "'chocolatey.list'", "]", "(", "local_only", "=", "True", ")", "ret", "[", "'changes'", "]", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "pre_uninstall", ",", "post_uninstall", ")", "return", "ret" ]
Uninstalls a package name The name of the package to be uninstalled version Uninstalls a specific version of the package. Defaults to latest version installed. uninstall_args A list of uninstall arguments you want to pass to the uninstallation process i.e product key or feature list override_args Set to true if you want to override the original uninstall arguments ( for the native uninstaller)in the package and use your own. When this is set to False uninstall_args will be appended to the end of the default arguments .. code-block: yaml Removemypackage: chocolatey.uninstalled: - name: mypackage - version: '21.5'
[ "Uninstalls", "a", "package" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L3661-L3679
def unassign_item_from_bank(self, item_id, bank_id): """Removes an ``Item`` from a ``Bank``. arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: NotFound - ``item_id`` or ``bank_id`` not found or ``item_id`` not assigned to ``bank_id`` raise: NullArgument - ``item_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) lookup_session.get_bank(bank_id) # to raise NotFound self._unassign_object_from_catalog(item_id, bank_id)
[ "def", "unassign_item_from_bank", "(", "self", ",", "item_id", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bank_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "get_bank", "(", "bank_id", ")", "# to raise NotFound", "self", ".", "_unassign_object_from_catalog", "(", "item_id", ",", "bank_id", ")" ]
Removes an ``Item`` from a ``Bank``. arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: NotFound - ``item_id`` or ``bank_id`` not found or ``item_id`` not assigned to ``bank_id`` raise: NullArgument - ``item_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Removes", "an", "Item", "from", "a", "Bank", "." ]
python
train
trp07/messages
messages/_utils.py
https://github.com/trp07/messages/blob/7789ebc960335a59ea5d319fceed3dd349023648/messages/_utils.py#L117-L130
def validate_whatsapp(attr, value): """WhatsApp input validator function.""" if attr in ("from_", "to"): if value is not None and "whatsapp:" in value: value = value.split("whatsapp:+")[-1] check_valid( "WhatsApp", attr, value, validus.isint, "phone number starting with the '+' symbol", ) elif attr in ("attachments"): check_valid("WhatsApp", attr, value, validus.isurl, "url")
[ "def", "validate_whatsapp", "(", "attr", ",", "value", ")", ":", "if", "attr", "in", "(", "\"from_\"", ",", "\"to\"", ")", ":", "if", "value", "is", "not", "None", "and", "\"whatsapp:\"", "in", "value", ":", "value", "=", "value", ".", "split", "(", "\"whatsapp:+\"", ")", "[", "-", "1", "]", "check_valid", "(", "\"WhatsApp\"", ",", "attr", ",", "value", ",", "validus", ".", "isint", ",", "\"phone number starting with the '+' symbol\"", ",", ")", "elif", "attr", "in", "(", "\"attachments\"", ")", ":", "check_valid", "(", "\"WhatsApp\"", ",", "attr", ",", "value", ",", "validus", ".", "isurl", ",", "\"url\"", ")" ]
WhatsApp input validator function.
[ "WhatsApp", "input", "validator", "function", "." ]
python
test
sporsh/carnifex
carnifex/ssh/session.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/ssh/session.py#L104-L108
def requestSubsystem(self, subsystem): """Request a subsystem and return a deferred reply. """ data = common.NS(subsystem) return self.sendRequest('subsystem', data, wantReply=True)
[ "def", "requestSubsystem", "(", "self", ",", "subsystem", ")", ":", "data", "=", "common", ".", "NS", "(", "subsystem", ")", "return", "self", ".", "sendRequest", "(", "'subsystem'", ",", "data", ",", "wantReply", "=", "True", ")" ]
Request a subsystem and return a deferred reply.
[ "Request", "a", "subsystem", "and", "return", "a", "deferred", "reply", "." ]
python
train
robehickman/simple-http-file-sync
shttpfs/server.py
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L314-L330
def pull_file(): """ Get a file from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== data_store = versioned_storage(config['repositories'][repository]['path']) file_info = data_store.get_file_info_from_path(request.headers['path']) return success({'file_info_json' : json.dumps(file_info)}, send_from_directory(data_store.get_file_directory_path(file_info['hash']), file_info['hash'][2:]))
[ "def", "pull_file", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", ".", "environ", "[", "'REMOTE_ADDR'", "]", ",", "repository", ",", "session_token", ")", "if", "current_user", "is", "False", ":", "return", "fail", "(", "user_auth_fail_msg", ")", "#===", "data_store", "=", "versioned_storage", "(", "config", "[", "'repositories'", "]", "[", "repository", "]", "[", "'path'", "]", ")", "file_info", "=", "data_store", ".", "get_file_info_from_path", "(", "request", ".", "headers", "[", "'path'", "]", ")", "return", "success", "(", "{", "'file_info_json'", ":", "json", ".", "dumps", "(", "file_info", ")", "}", ",", "send_from_directory", "(", "data_store", ".", "get_file_directory_path", "(", "file_info", "[", "'hash'", "]", ")", ",", "file_info", "[", "'hash'", "]", "[", "2", ":", "]", ")", ")" ]
Get a file from the server
[ "Get", "a", "file", "from", "the", "server" ]
python
train
pandas-dev/pandas
pandas/io/formats/format.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1442-L1461
def _trim_zeros_float(str_floats, na_rep='NaN'): """ Trims zeros, leaving just one before the decimal points if need be. """ trimmed = str_floats def _is_number(x): return (x != na_rep and not x.endswith('inf')) def _cond(values): finite = [x for x in values if _is_number(x)] return (len(finite) > 0 and all(x.endswith('0') for x in finite) and not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. return [x + "0" if x.endswith('.') and _is_number(x) else x for x in trimmed]
[ "def", "_trim_zeros_float", "(", "str_floats", ",", "na_rep", "=", "'NaN'", ")", ":", "trimmed", "=", "str_floats", "def", "_is_number", "(", "x", ")", ":", "return", "(", "x", "!=", "na_rep", "and", "not", "x", ".", "endswith", "(", "'inf'", ")", ")", "def", "_cond", "(", "values", ")", ":", "finite", "=", "[", "x", "for", "x", "in", "values", "if", "_is_number", "(", "x", ")", "]", "return", "(", "len", "(", "finite", ")", ">", "0", "and", "all", "(", "x", ".", "endswith", "(", "'0'", ")", "for", "x", "in", "finite", ")", "and", "not", "(", "any", "(", "(", "'e'", "in", "x", ")", "or", "(", "'E'", "in", "x", ")", "for", "x", "in", "finite", ")", ")", ")", "while", "_cond", "(", "trimmed", ")", ":", "trimmed", "=", "[", "x", "[", ":", "-", "1", "]", "if", "_is_number", "(", "x", ")", "else", "x", "for", "x", "in", "trimmed", "]", "# leave one 0 after the decimal points if need be.", "return", "[", "x", "+", "\"0\"", "if", "x", ".", "endswith", "(", "'.'", ")", "and", "_is_number", "(", "x", ")", "else", "x", "for", "x", "in", "trimmed", "]" ]
Trims zeros, leaving just one before the decimal points if need be.
[ "Trims", "zeros", "leaving", "just", "one", "before", "the", "decimal", "points", "if", "need", "be", "." ]
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L216-L234
def try_get_dn_part(subject, oid=None): """ Tries to extracts the OID from the X500 name. :param subject: :param oid: :return: """ try: if subject is None: return None if oid is None: return None for sub in subject: if oid is not None and sub.oid == oid: return sub.value except: pass return None
[ "def", "try_get_dn_part", "(", "subject", ",", "oid", "=", "None", ")", ":", "try", ":", "if", "subject", "is", "None", ":", "return", "None", "if", "oid", "is", "None", ":", "return", "None", "for", "sub", "in", "subject", ":", "if", "oid", "is", "not", "None", "and", "sub", ".", "oid", "==", "oid", ":", "return", "sub", ".", "value", "except", ":", "pass", "return", "None" ]
Tries to extracts the OID from the X500 name. :param subject: :param oid: :return:
[ "Tries", "to", "extracts", "the", "OID", "from", "the", "X500", "name", ".", ":", "param", "subject", ":", ":", "param", "oid", ":", ":", "return", ":" ]
python
train
sassoo/goldman
goldman/utils/responder_helpers.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/responder_helpers.py#L317-L334
def to_rest_models(models, includes=None): """ Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict """ props = {} props['data'] = [] for model in models: props['data'].append(_to_rest(model, includes=includes)) props['included'] = _to_rest_includes(models, includes=includes) return props
[ "def", "to_rest_models", "(", "models", ",", "includes", "=", "None", ")", ":", "props", "=", "{", "}", "props", "[", "'data'", "]", "=", "[", "]", "for", "model", "in", "models", ":", "props", "[", "'data'", "]", ".", "append", "(", "_to_rest", "(", "model", ",", "includes", "=", "includes", ")", ")", "props", "[", "'included'", "]", "=", "_to_rest_includes", "(", "models", ",", "includes", "=", "includes", ")", "return", "props" ]
Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict
[ "Convert", "the", "models", "into", "a", "dict", "for", "serialization" ]
python
train
shaunduncan/giphypop
giphypop.py
https://github.com/shaunduncan/giphypop/blob/21e7f51c4f000ae24be3805b7eeec52bcce3d390/giphypop.py#L509-L516
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method. """ return Giphy(api_key=api_key, strict=strict).translate( term=term, phrase=phrase, rating=rating)
[ "def", "translate", "(", "term", "=", "None", ",", "phrase", "=", "None", ",", "api_key", "=", "GIPHY_PUBLIC_KEY", ",", "strict", "=", "False", ",", "rating", "=", "None", ")", ":", "return", "Giphy", "(", "api_key", "=", "api_key", ",", "strict", "=", "strict", ")", ".", "translate", "(", "term", "=", "term", ",", "phrase", "=", "phrase", ",", "rating", "=", "rating", ")" ]
Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method.
[ "Shorthand", "for", "creating", "a", "Giphy", "api", "wrapper", "with", "the", "given", "api", "key", "and", "then", "calling", "the", "translate", "method", "." ]
python
test
VIVelev/PyDojoML
dojo/base/model.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/base/model.py#L85-L106
def fit(self, X, y): """Fits the given model to the data and labels provided. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. Returns: -------- self : instance of the model itself (`self`) """ X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) assert X.shape[0] == y.shape[0] return X, y
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "X", "=", "np", ".", "array", "(", "X", ",", "dtype", "=", "np", ".", "float32", ")", "y", "=", "np", ".", "array", "(", "y", ",", "dtype", "=", "np", ".", "float32", ")", "assert", "X", ".", "shape", "[", "0", "]", "==", "y", ".", "shape", "[", "0", "]", "return", "X", ",", "y" ]
Fits the given model to the data and labels provided. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. Returns: -------- self : instance of the model itself (`self`)
[ "Fits", "the", "given", "model", "to", "the", "data", "and", "labels", "provided", ".", "Parameters", ":", "-----------", "X", ":", "matrix", "shape", "(", "n_samples", "n_features", ")", "The", "samples", "the", "train", "data", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/settings/firmware_drivers.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/settings/firmware_drivers.py#L73-L92
def get_by(self, field, value): """ Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to filter the list of resources returned. The search is case-insensitive. Args: field: Field name to filter. value: Value to filter. Returns: list: List of firmware baseline resources. """ firmwares = self.get_all() matches = [] for item in firmwares: if item.get(field) == value: matches.append(item) return matches
[ "def", "get_by", "(", "self", ",", "field", ",", "value", ")", ":", "firmwares", "=", "self", ".", "get_all", "(", ")", "matches", "=", "[", "]", "for", "item", "in", "firmwares", ":", "if", "item", ".", "get", "(", "field", ")", "==", "value", ":", "matches", ".", "append", "(", "item", ")", "return", "matches" ]
Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to filter the list of resources returned. The search is case-insensitive. Args: field: Field name to filter. value: Value to filter. Returns: list: List of firmware baseline resources.
[ "Gets", "the", "list", "of", "firmware", "baseline", "resources", "managed", "by", "the", "appliance", ".", "Optional", "parameters", "can", "be", "used", "to", "filter", "the", "list", "of", "resources", "returned", "." ]
python
train
dwavesystems/dwave-system
dwave/embedding/polynomialembedder.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/polynomialembedder.py#L1064-L1098
def largestNativeBiClique(self, chain_imbalance=0, max_chain_length=None): """Returns a native embedding for the complete bipartite graph :math:`K_{n,m}` for `n <= m`; where `n` is as large as possible and `m` is as large as possible subject to `n`. The native embedding of a complete bipartite graph is a set of horizontally-aligned qubits connected in lines together with an equal-sized set of vertically-aligned qubits connected in lines. INPUTS: chain_imbalance: how big of a difference to allow between the chain lengths on the two sides of the bipartition. If ``None``, then we allow an arbitrary imbalance. (default: ``0``) max_chain_length: longest chain length to consider or None if chain lengths are allowed to be unbounded. (default: ``None``) OUTPUT: embedding (tuple): a tuple of two lists containing lists of qubits. If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and ``B_side`` are chains of qubits. These lists of qubits are arranged so that >>> [zip(chain,chain[1:]) for chain in A_side] and >>> [zip(chain,chain[1:]) for chain in B_side] are lists of valid couplers. """ def f(x): return x.largestNativeBiClique(chain_imbalance=chain_imbalance, max_chain_length=max_chain_length) objective = self._objective_bestscore emb = self._map_to_processors(f, objective) return self._translate_partitioned(emb)
[ "def", "largestNativeBiClique", "(", "self", ",", "chain_imbalance", "=", "0", ",", "max_chain_length", "=", "None", ")", ":", "def", "f", "(", "x", ")", ":", "return", "x", ".", "largestNativeBiClique", "(", "chain_imbalance", "=", "chain_imbalance", ",", "max_chain_length", "=", "max_chain_length", ")", "objective", "=", "self", ".", "_objective_bestscore", "emb", "=", "self", ".", "_map_to_processors", "(", "f", ",", "objective", ")", "return", "self", ".", "_translate_partitioned", "(", "emb", ")" ]
Returns a native embedding for the complete bipartite graph :math:`K_{n,m}` for `n <= m`; where `n` is as large as possible and `m` is as large as possible subject to `n`. The native embedding of a complete bipartite graph is a set of horizontally-aligned qubits connected in lines together with an equal-sized set of vertically-aligned qubits connected in lines. INPUTS: chain_imbalance: how big of a difference to allow between the chain lengths on the two sides of the bipartition. If ``None``, then we allow an arbitrary imbalance. (default: ``0``) max_chain_length: longest chain length to consider or None if chain lengths are allowed to be unbounded. (default: ``None``) OUTPUT: embedding (tuple): a tuple of two lists containing lists of qubits. If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and ``B_side`` are chains of qubits. These lists of qubits are arranged so that >>> [zip(chain,chain[1:]) for chain in A_side] and >>> [zip(chain,chain[1:]) for chain in B_side] are lists of valid couplers.
[ "Returns", "a", "native", "embedding", "for", "the", "complete", "bipartite", "graph", ":", "math", ":", "K_", "{", "n", "m", "}", "for", "n", "<", "=", "m", ";", "where", "n", "is", "as", "large", "as", "possible", "and", "m", "is", "as", "large", "as", "possible", "subject", "to", "n", ".", "The", "native", "embedding", "of", "a", "complete", "bipartite", "graph", "is", "a", "set", "of", "horizontally", "-", "aligned", "qubits", "connected", "in", "lines", "together", "with", "an", "equal", "-", "sized", "set", "of", "vertically", "-", "aligned", "qubits", "connected", "in", "lines", "." ]
python
train
not-na/peng3d
peng3d/gui/menus.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/menus.py#L151-L167
def add_label_main(self,label_main): """ Adds the main label of the dialog. This widget can be triggered by setting the label ``label_main`` to a string. This widget will be centered on the screen. """ # Main Label self.wlabel_main = text.Label("label_main",self,self.window,self.peng, pos=lambda sw,sh, bw,bh: (sw/2-bw/2,sh/2-bh/2), size=[0,0], label=label_main, #multiline=True, # TODO: implement multine dialog ) self.wlabel_main.size = lambda sw,sh: (sw,self.wlabel_main._label.font_size) self.addWidget(self.wlabel_main)
[ "def", "add_label_main", "(", "self", ",", "label_main", ")", ":", "# Main Label", "self", ".", "wlabel_main", "=", "text", ".", "Label", "(", "\"label_main\"", ",", "self", ",", "self", ".", "window", ",", "self", ".", "peng", ",", "pos", "=", "lambda", "sw", ",", "sh", ",", "bw", ",", "bh", ":", "(", "sw", "/", "2", "-", "bw", "/", "2", ",", "sh", "/", "2", "-", "bh", "/", "2", ")", ",", "size", "=", "[", "0", ",", "0", "]", ",", "label", "=", "label_main", ",", "#multiline=True, # TODO: implement multine dialog", ")", "self", ".", "wlabel_main", ".", "size", "=", "lambda", "sw", ",", "sh", ":", "(", "sw", ",", "self", ".", "wlabel_main", ".", "_label", ".", "font_size", ")", "self", ".", "addWidget", "(", "self", ".", "wlabel_main", ")" ]
Adds the main label of the dialog. This widget can be triggered by setting the label ``label_main`` to a string. This widget will be centered on the screen.
[ "Adds", "the", "main", "label", "of", "the", "dialog", ".", "This", "widget", "can", "be", "triggered", "by", "setting", "the", "label", "label_main", "to", "a", "string", ".", "This", "widget", "will", "be", "centered", "on", "the", "screen", "." ]
python
test
SKA-ScienceDataProcessor/integration-prototype
sip/platform/logging/sip_logging/sip_logging.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/platform/logging/sip_logging/sip_logging.py#L119-L123
def set_log_level(logger_name: str, log_level: str, propagate: bool = False): """Set the log level of the specified logger.""" log = logging.getLogger(logger_name) log.propagate = propagate log.setLevel(log_level)
[ "def", "set_log_level", "(", "logger_name", ":", "str", ",", "log_level", ":", "str", ",", "propagate", ":", "bool", "=", "False", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "logger_name", ")", "log", ".", "propagate", "=", "propagate", "log", ".", "setLevel", "(", "log_level", ")" ]
Set the log level of the specified logger.
[ "Set", "the", "log", "level", "of", "the", "specified", "logger", "." ]
python
train
LinuxChristian/pyW215
pyW215/pyW215.py
https://github.com/LinuxChristian/pyW215/blob/63e50b8ee11bc38ed66554f9b92429b552dda550/pyW215/pyW215.py#L285-L296
def state(self, value): """Set device state. :type value: str :param value: Future state (either ON or OFF) """ if value.upper() == ON: return self.SOAPAction('SetSocketSettings', 'SetSocketSettingsResult', self.controlParameters("1", "true")) elif value.upper() == OFF: return self.SOAPAction('SetSocketSettings', 'SetSocketSettingsResult', self.controlParameters("1", "false")) else: raise TypeError("State %s is not valid." % str(value))
[ "def", "state", "(", "self", ",", "value", ")", ":", "if", "value", ".", "upper", "(", ")", "==", "ON", ":", "return", "self", ".", "SOAPAction", "(", "'SetSocketSettings'", ",", "'SetSocketSettingsResult'", ",", "self", ".", "controlParameters", "(", "\"1\"", ",", "\"true\"", ")", ")", "elif", "value", ".", "upper", "(", ")", "==", "OFF", ":", "return", "self", ".", "SOAPAction", "(", "'SetSocketSettings'", ",", "'SetSocketSettingsResult'", ",", "self", ".", "controlParameters", "(", "\"1\"", ",", "\"false\"", ")", ")", "else", ":", "raise", "TypeError", "(", "\"State %s is not valid.\"", "%", "str", "(", "value", ")", ")" ]
Set device state. :type value: str :param value: Future state (either ON or OFF)
[ "Set", "device", "state", "." ]
python
train
jasonrbriggs/stomp.py
stomp/listener.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/listener.py#L384-L395
def on_error(self, headers, body): """ Increment the error count. See :py:meth:`ConnectionListener.on_error` :param dict headers: headers in the message :param body: the message content """ if log.isEnabledFor(logging.DEBUG): log.debug("received an error %s [%s]", body, headers) else: log.info("received an error %s", body) self.errors += 1
[ "def", "on_error", "(", "self", ",", "headers", ",", "body", ")", ":", "if", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "log", ".", "debug", "(", "\"received an error %s [%s]\"", ",", "body", ",", "headers", ")", "else", ":", "log", ".", "info", "(", "\"received an error %s\"", ",", "body", ")", "self", ".", "errors", "+=", "1" ]
Increment the error count. See :py:meth:`ConnectionListener.on_error` :param dict headers: headers in the message :param body: the message content
[ "Increment", "the", "error", "count", ".", "See", ":", "py", ":", "meth", ":", "ConnectionListener", ".", "on_error" ]
python
train
quantmind/pulsar
examples/proxyserver/manage.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/proxyserver/manage.py#L157-L184
def pre_request(self, response, exc=None): """Start the tunnel. This is a callback fired once a connection with upstream server is established. """ if response.request.method == 'CONNECT': self.start_response( '200 Connection established', [('content-length', '0')] ) # send empty byte so that headers are sent self.future.set_result([b'']) # proxy - server connection upstream = response.connection # client - proxy connection dostream = self.connection # Upgrade downstream connection dostream.upgrade(partial(StreamTunnel.create, upstream)) # # upstream upgrade upstream.upgrade(partial(StreamTunnel.create, dostream)) response.fire_event('post_request') # abort the event raise AbortEvent else: response.event('data_processed').bind(self.data_processed) response.event('post_request').bind(self.post_request)
[ "def", "pre_request", "(", "self", ",", "response", ",", "exc", "=", "None", ")", ":", "if", "response", ".", "request", ".", "method", "==", "'CONNECT'", ":", "self", ".", "start_response", "(", "'200 Connection established'", ",", "[", "(", "'content-length'", ",", "'0'", ")", "]", ")", "# send empty byte so that headers are sent", "self", ".", "future", ".", "set_result", "(", "[", "b''", "]", ")", "# proxy - server connection", "upstream", "=", "response", ".", "connection", "# client - proxy connection", "dostream", "=", "self", ".", "connection", "# Upgrade downstream connection", "dostream", ".", "upgrade", "(", "partial", "(", "StreamTunnel", ".", "create", ",", "upstream", ")", ")", "#", "# upstream upgrade", "upstream", ".", "upgrade", "(", "partial", "(", "StreamTunnel", ".", "create", ",", "dostream", ")", ")", "response", ".", "fire_event", "(", "'post_request'", ")", "# abort the event", "raise", "AbortEvent", "else", ":", "response", ".", "event", "(", "'data_processed'", ")", ".", "bind", "(", "self", ".", "data_processed", ")", "response", ".", "event", "(", "'post_request'", ")", ".", "bind", "(", "self", ".", "post_request", ")" ]
Start the tunnel. This is a callback fired once a connection with upstream server is established.
[ "Start", "the", "tunnel", "." ]
python
train
dcwatson/bbcode
bbcode.py
https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L347-L370
def _tag_extent(self, data, start): """ Finds the extent of a tag, accounting for option quoting and new tags starting before the current one closes. Returns (found_close, end_pos) where valid is False if another tag started before this one closed. """ in_quote = False quotable = False lto = len(self.tag_opener) ltc = len(self.tag_closer) for i in xrange(start + 1, len(data)): ch = data[i] if ch == '=': quotable = True if ch in ('"', "'"): if quotable and not in_quote: in_quote = ch elif in_quote == ch: in_quote = False quotable = False if not in_quote and data[i:i + lto] == self.tag_opener: return i, False if not in_quote and data[i:i + ltc] == self.tag_closer: return i + ltc, True return len(data), False
[ "def", "_tag_extent", "(", "self", ",", "data", ",", "start", ")", ":", "in_quote", "=", "False", "quotable", "=", "False", "lto", "=", "len", "(", "self", ".", "tag_opener", ")", "ltc", "=", "len", "(", "self", ".", "tag_closer", ")", "for", "i", "in", "xrange", "(", "start", "+", "1", ",", "len", "(", "data", ")", ")", ":", "ch", "=", "data", "[", "i", "]", "if", "ch", "==", "'='", ":", "quotable", "=", "True", "if", "ch", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "if", "quotable", "and", "not", "in_quote", ":", "in_quote", "=", "ch", "elif", "in_quote", "==", "ch", ":", "in_quote", "=", "False", "quotable", "=", "False", "if", "not", "in_quote", "and", "data", "[", "i", ":", "i", "+", "lto", "]", "==", "self", ".", "tag_opener", ":", "return", "i", ",", "False", "if", "not", "in_quote", "and", "data", "[", "i", ":", "i", "+", "ltc", "]", "==", "self", ".", "tag_closer", ":", "return", "i", "+", "ltc", ",", "True", "return", "len", "(", "data", ")", ",", "False" ]
Finds the extent of a tag, accounting for option quoting and new tags starting before the current one closes. Returns (found_close, end_pos) where valid is False if another tag started before this one closed.
[ "Finds", "the", "extent", "of", "a", "tag", "accounting", "for", "option", "quoting", "and", "new", "tags", "starting", "before", "the", "current", "one", "closes", ".", "Returns", "(", "found_close", "end_pos", ")", "where", "valid", "is", "False", "if", "another", "tag", "started", "before", "this", "one", "closed", "." ]
python
train
cs50/python-cs50
src/cs50/cs50.py
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L47-L67
def formatException(type, value, tb): """ Format traceback, darkening entries from global site-packages directories and user-specific site-packages directory. https://stackoverflow.com/a/46071447/5156190 """ # Absolute paths to site-packages packages = tuple(join(abspath(p), "") for p in sys.path[1:]) # Highlight lines not referring to files in site-packages lines = [] for line in format_exception(type, value, tb): matches = re.search(r"^ File \"([^\"]+)\", line \d+, in .+", line) if matches and matches.group(1).startswith(packages): lines += line else: matches = re.search(r"^(\s*)(.*?)(\s*)$", line, re.DOTALL) lines.append(matches.group(1) + colored(matches.group(2), "yellow") + matches.group(3)) return "".join(lines).rstrip()
[ "def", "formatException", "(", "type", ",", "value", ",", "tb", ")", ":", "# Absolute paths to site-packages", "packages", "=", "tuple", "(", "join", "(", "abspath", "(", "p", ")", ",", "\"\"", ")", "for", "p", "in", "sys", ".", "path", "[", "1", ":", "]", ")", "# Highlight lines not referring to files in site-packages", "lines", "=", "[", "]", "for", "line", "in", "format_exception", "(", "type", ",", "value", ",", "tb", ")", ":", "matches", "=", "re", ".", "search", "(", "r\"^ File \\\"([^\\\"]+)\\\", line \\d+, in .+\"", ",", "line", ")", "if", "matches", "and", "matches", ".", "group", "(", "1", ")", ".", "startswith", "(", "packages", ")", ":", "lines", "+=", "line", "else", ":", "matches", "=", "re", ".", "search", "(", "r\"^(\\s*)(.*?)(\\s*)$\"", ",", "line", ",", "re", ".", "DOTALL", ")", "lines", ".", "append", "(", "matches", ".", "group", "(", "1", ")", "+", "colored", "(", "matches", ".", "group", "(", "2", ")", ",", "\"yellow\"", ")", "+", "matches", ".", "group", "(", "3", ")", ")", "return", "\"\"", ".", "join", "(", "lines", ")", ".", "rstrip", "(", ")" ]
Format traceback, darkening entries from global site-packages directories and user-specific site-packages directory. https://stackoverflow.com/a/46071447/5156190
[ "Format", "traceback", "darkening", "entries", "from", "global", "site", "-", "packages", "directories", "and", "user", "-", "specific", "site", "-", "packages", "directory", "." ]
python
train
nerdvegas/rez
src/rez/vendor/colorama/ansitowin32.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/colorama/ansitowin32.py#L131-L143
def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 for match in self.ANSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text))
[ "def", "write_and_convert", "(", "self", ",", "text", ")", ":", "cursor", "=", "0", "for", "match", "in", "self", ".", "ANSI_RE", ".", "finditer", "(", "text", ")", ":", "start", ",", "end", "=", "match", ".", "span", "(", ")", "self", ".", "write_plain_text", "(", "text", ",", "cursor", ",", "start", ")", "self", ".", "convert_ansi", "(", "*", "match", ".", "groups", "(", ")", ")", "cursor", "=", "end", "self", ".", "write_plain_text", "(", "text", ",", "cursor", ",", "len", "(", "text", ")", ")" ]
Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls.
[ "Write", "the", "given", "text", "to", "our", "wrapped", "stream", "stripping", "any", "ANSI", "sequences", "from", "the", "text", "and", "optionally", "converting", "them", "into", "win32", "calls", "." ]
python
train
neuroticnerd/armory
armory/utils/boolean.py
https://github.com/neuroticnerd/armory/blob/d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1/armory/utils/boolean.py#L21-L50
def boolean(value, boolmap=_BOOL_MAP): """ Convert value to <type bool>. Uses the boolean mapping dict to attempt to determine the conversion of the given value. If the value is not found in the mapping, it falls back to the built-in Python bool conversion. Optionally, a custom mapping dict can be passed to use for the value lookups. The default mapping dict allows quick and easy conversion of some common string values that have logical meaning as being true or false. This alternative allows one to consider a separate mapping when converting values to a boolean that are not what Python would inherently consider the value as. This is particularly useful for environment variables which are almost always retrieved as strings instead of whatever their inherent data type actually is. Because of how Python treats strings, this means that an environment variable which has been given the value of ``"False"`` ends up evaluating to ``True`` even though that was probably not the intention. """ if boolmap == _BOOL_MAP and isinstance(value, str): result = boolmap.get(value.lower()) else: result = boolmap.get(value) if result is None: result = bool(value) return result
[ "def", "boolean", "(", "value", ",", "boolmap", "=", "_BOOL_MAP", ")", ":", "if", "boolmap", "==", "_BOOL_MAP", "and", "isinstance", "(", "value", ",", "str", ")", ":", "result", "=", "boolmap", ".", "get", "(", "value", ".", "lower", "(", ")", ")", "else", ":", "result", "=", "boolmap", ".", "get", "(", "value", ")", "if", "result", "is", "None", ":", "result", "=", "bool", "(", "value", ")", "return", "result" ]
Convert value to <type bool>. Uses the boolean mapping dict to attempt to determine the conversion of the given value. If the value is not found in the mapping, it falls back to the built-in Python bool conversion. Optionally, a custom mapping dict can be passed to use for the value lookups. The default mapping dict allows quick and easy conversion of some common string values that have logical meaning as being true or false. This alternative allows one to consider a separate mapping when converting values to a boolean that are not what Python would inherently consider the value as. This is particularly useful for environment variables which are almost always retrieved as strings instead of whatever their inherent data type actually is. Because of how Python treats strings, this means that an environment variable which has been given the value of ``"False"`` ends up evaluating to ``True`` even though that was probably not the intention.
[ "Convert", "value", "to", "<type", "bool", ">", "." ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1007-L1025
def console_map_ascii_code_to_font( asciiCode: int, fontCharX: int, fontCharY: int ) -> None: """Set a character code to new coordinates on the tile-set. `asciiCode` must be within the bounds created during the initialization of the loaded tile-set. For example, you can't use 255 here unless you have a 256 tile tile-set loaded. This applies to all functions in this group. Args: asciiCode (int): The character code to change. fontCharX (int): The X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The Y tile coordinate on the loaded tileset. 0 is the topmost tile. """ lib.TCOD_console_map_ascii_code_to_font( _int(asciiCode), fontCharX, fontCharY )
[ "def", "console_map_ascii_code_to_font", "(", "asciiCode", ":", "int", ",", "fontCharX", ":", "int", ",", "fontCharY", ":", "int", ")", "->", "None", ":", "lib", ".", "TCOD_console_map_ascii_code_to_font", "(", "_int", "(", "asciiCode", ")", ",", "fontCharX", ",", "fontCharY", ")" ]
Set a character code to new coordinates on the tile-set. `asciiCode` must be within the bounds created during the initialization of the loaded tile-set. For example, you can't use 255 here unless you have a 256 tile tile-set loaded. This applies to all functions in this group. Args: asciiCode (int): The character code to change. fontCharX (int): The X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The Y tile coordinate on the loaded tileset. 0 is the topmost tile.
[ "Set", "a", "character", "code", "to", "new", "coordinates", "on", "the", "tile", "-", "set", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/varp.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/varp.py#L66-L93
def get(self): """Returns the current VARP configuration The Varp resource returns the following: * mac_address (str): The virtual-router mac address * interfaces (dict): A list of the interfaces that have a virtual-router address configured. Return: A Python dictionary object of key/value pairs that represents the current configuration of the node. If the specified interface does not exist then None is returned:: { "mac_address": "aa:bb:cc:dd:ee:ff", "interfaces": { "Vlan100": { "addresses": [ "1.1.1.1", "2.2.2.2"] }, "Vlan200": [...] } } """ resource = dict() resource.update(self._parse_mac_address()) resource.update(self._parse_interfaces()) return resource
[ "def", "get", "(", "self", ")", ":", "resource", "=", "dict", "(", ")", "resource", ".", "update", "(", "self", ".", "_parse_mac_address", "(", ")", ")", "resource", ".", "update", "(", "self", ".", "_parse_interfaces", "(", ")", ")", "return", "resource" ]
Returns the current VARP configuration The Varp resource returns the following: * mac_address (str): The virtual-router mac address * interfaces (dict): A list of the interfaces that have a virtual-router address configured. Return: A Python dictionary object of key/value pairs that represents the current configuration of the node. If the specified interface does not exist then None is returned:: { "mac_address": "aa:bb:cc:dd:ee:ff", "interfaces": { "Vlan100": { "addresses": [ "1.1.1.1", "2.2.2.2"] }, "Vlan200": [...] } }
[ "Returns", "the", "current", "VARP", "configuration" ]
python
train
secdev/scapy
scapy/layers/tls/tools.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/tools.py#L65-L76
def _tls_mac_add(alg, c, write_seq_num): """ Compute the MAC using provided MAC alg instance over TLSCiphertext c using current write sequence number write_seq_num. Computed MAC is then appended to c.data and c.len is updated to reflect that change. It is the caller responsibility to increment the sequence number after the operation. The function has no return value. """ write_seq_num = struct.pack("!Q", write_seq_num) h = alg.digest(write_seq_num + bytes(c)) c.data += h c.len += alg.hash_len
[ "def", "_tls_mac_add", "(", "alg", ",", "c", ",", "write_seq_num", ")", ":", "write_seq_num", "=", "struct", ".", "pack", "(", "\"!Q\"", ",", "write_seq_num", ")", "h", "=", "alg", ".", "digest", "(", "write_seq_num", "+", "bytes", "(", "c", ")", ")", "c", ".", "data", "+=", "h", "c", ".", "len", "+=", "alg", ".", "hash_len" ]
Compute the MAC using provided MAC alg instance over TLSCiphertext c using current write sequence number write_seq_num. Computed MAC is then appended to c.data and c.len is updated to reflect that change. It is the caller responsibility to increment the sequence number after the operation. The function has no return value.
[ "Compute", "the", "MAC", "using", "provided", "MAC", "alg", "instance", "over", "TLSCiphertext", "c", "using", "current", "write", "sequence", "number", "write_seq_num", ".", "Computed", "MAC", "is", "then", "appended", "to", "c", ".", "data", "and", "c", ".", "len", "is", "updated", "to", "reflect", "that", "change", ".", "It", "is", "the", "caller", "responsibility", "to", "increment", "the", "sequence", "number", "after", "the", "operation", ".", "The", "function", "has", "no", "return", "value", "." ]
python
train
user-cont/conu
conu/apidefs/filesystem.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/apidefs/filesystem.py#L146-L158
def directory_is_present(self, directory_path): """ check if directory 'directory_path' is present, raise IOError if it's not a directory :param directory_path: str, directory to check :return: True if directory exists, False if directory does not exist """ p = self.p(directory_path) if not os.path.exists(p): return False if not os.path.isdir(p): raise IOError("%s is not a directory" % directory_path) return True
[ "def", "directory_is_present", "(", "self", ",", "directory_path", ")", ":", "p", "=", "self", ".", "p", "(", "directory_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "return", "False", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "raise", "IOError", "(", "\"%s is not a directory\"", "%", "directory_path", ")", "return", "True" ]
check if directory 'directory_path' is present, raise IOError if it's not a directory :param directory_path: str, directory to check :return: True if directory exists, False if directory does not exist
[ "check", "if", "directory", "directory_path", "is", "present", "raise", "IOError", "if", "it", "s", "not", "a", "directory" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/configeditor.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L103-L133
def setData(self, index, value, role=QtCore.Qt.EditRole): """Reimplemented from QtCore.QAbstractItemModel You can only set the value. :param index: the index to edit, column should be 1. :type index: :class:`PySide.QtCore.QModelIndex` :param value: the new value for the configobj :type value: object :param role: Optional - the ItemDataRole. Default is QtCore.Qt.EditRole :type role: QtCore.Qt.ItemDataRole :returns: True if index was edited, False if index could not be edited. :rtype: bool :raises: None """ if index.isValid(): if role == QtCore.Qt.EditRole: if index.column() == 1: p = index.internalPointer() k = self.get_key(p, index.row()) # we could just set the value # BUT for listvalues etc it will not work strval = self._val_to_str(value) # _handle_value will parse it correctly # comments gets lost (parsedval, comment) = self._conf._handle_value(strval) p[k] = parsedval self.dataChanged.emit(index, index) return True return False
[ "def", "setData", "(", "self", ",", "index", ",", "value", ",", "role", "=", "QtCore", ".", "Qt", ".", "EditRole", ")", ":", "if", "index", ".", "isValid", "(", ")", ":", "if", "role", "==", "QtCore", ".", "Qt", ".", "EditRole", ":", "if", "index", ".", "column", "(", ")", "==", "1", ":", "p", "=", "index", ".", "internalPointer", "(", ")", "k", "=", "self", ".", "get_key", "(", "p", ",", "index", ".", "row", "(", ")", ")", "# we could just set the value", "# BUT for listvalues etc it will not work", "strval", "=", "self", ".", "_val_to_str", "(", "value", ")", "# _handle_value will parse it correctly", "# comments gets lost", "(", "parsedval", ",", "comment", ")", "=", "self", ".", "_conf", ".", "_handle_value", "(", "strval", ")", "p", "[", "k", "]", "=", "parsedval", "self", ".", "dataChanged", ".", "emit", "(", "index", ",", "index", ")", "return", "True", "return", "False" ]
Reimplemented from QtCore.QAbstractItemModel You can only set the value. :param index: the index to edit, column should be 1. :type index: :class:`PySide.QtCore.QModelIndex` :param value: the new value for the configobj :type value: object :param role: Optional - the ItemDataRole. Default is QtCore.Qt.EditRole :type role: QtCore.Qt.ItemDataRole :returns: True if index was edited, False if index could not be edited. :rtype: bool :raises: None
[ "Reimplemented", "from", "QtCore", ".", "QAbstractItemModel" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/trax/trainer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L65-L81
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("[email protected].%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
[ "def", "_setup_gin", "(", ")", ":", "# Imports for configurables", "# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable", "from", "tensor2tensor", ".", "trax", "import", "models", "as", "_trax_models", "from", "tensor2tensor", ".", "trax", "import", "optimizers", "as", "_trax_opt", "# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable", "configs", "=", "FLAGS", ".", "config", "or", "[", "]", "# Override with --dataset and --model", "if", "FLAGS", ".", "dataset", ":", "configs", ".", "append", "(", "\"inputs.dataset_name='%s'\"", "%", "FLAGS", ".", "dataset", ")", "if", "FLAGS", ".", "data_dir", ":", "configs", ".", "append", "(", "\"inputs.data_dir='%s'\"", "%", "FLAGS", ".", "data_dir", ")", "if", "FLAGS", ".", "model", ":", "configs", ".", "append", "(", "\"[email protected].%s\"", "%", "FLAGS", ".", "model", ")", "gin", ".", "parse_config_files_and_bindings", "(", "FLAGS", ".", "config_file", ",", "configs", ")" ]
Setup gin configuration.
[ "Setup", "gin", "configuration", "." ]
python
train
pantsbuild/pants
src/python/pants/util/dirutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/dirutil.py#L475-L493
def touch(path, times=None): """Equivalent of unix `touch path`. :API: public :path: The file to touch. :times Either a tuple of (atime, mtime) or else a single time to use for both. If not specified both atime and mtime are updated to the current time. """ if times: if len(times) > 2: raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value ' 'to use for both.') if len(times) == 1: times = (times, times) with safe_open(path, 'a'): os.utime(path, times)
[ "def", "touch", "(", "path", ",", "times", "=", "None", ")", ":", "if", "times", ":", "if", "len", "(", "times", ")", ">", "2", ":", "raise", "ValueError", "(", "'times must either be a tuple of (atime, mtime) or else a single time value '", "'to use for both.'", ")", "if", "len", "(", "times", ")", "==", "1", ":", "times", "=", "(", "times", ",", "times", ")", "with", "safe_open", "(", "path", ",", "'a'", ")", ":", "os", ".", "utime", "(", "path", ",", "times", ")" ]
Equivalent of unix `touch path`. :API: public :path: The file to touch. :times Either a tuple of (atime, mtime) or else a single time to use for both. If not specified both atime and mtime are updated to the current time.
[ "Equivalent", "of", "unix", "touch", "path", "." ]
python
train
gwpy/gwpy
gwpy/io/gwf.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L305-L322
def _iter_channels(framefile): """Yields the name and type of each channel in a GWF file TOC **Requires:** |LDAStools.frameCPP|_ Parameters ---------- framefile : `str`, `LDAStools.frameCPP.IFrameFStream` path of GWF file, or open file stream, to read """ from LDAStools import frameCPP if not isinstance(framefile, frameCPP.IFrameFStream): framefile = open_gwf(framefile, 'r') toc = framefile.GetTOC() for typename in ('Sim', 'Proc', 'ADC'): typen = typename.lower() for name in getattr(toc, 'Get{0}'.format(typename))(): yield name, typen
[ "def", "_iter_channels", "(", "framefile", ")", ":", "from", "LDAStools", "import", "frameCPP", "if", "not", "isinstance", "(", "framefile", ",", "frameCPP", ".", "IFrameFStream", ")", ":", "framefile", "=", "open_gwf", "(", "framefile", ",", "'r'", ")", "toc", "=", "framefile", ".", "GetTOC", "(", ")", "for", "typename", "in", "(", "'Sim'", ",", "'Proc'", ",", "'ADC'", ")", ":", "typen", "=", "typename", ".", "lower", "(", ")", "for", "name", "in", "getattr", "(", "toc", ",", "'Get{0}'", ".", "format", "(", "typename", ")", ")", "(", ")", ":", "yield", "name", ",", "typen" ]
Yields the name and type of each channel in a GWF file TOC **Requires:** |LDAStools.frameCPP|_ Parameters ---------- framefile : `str`, `LDAStools.frameCPP.IFrameFStream` path of GWF file, or open file stream, to read
[ "Yields", "the", "name", "and", "type", "of", "each", "channel", "in", "a", "GWF", "file", "TOC" ]
python
train
azogue/dataweb
dataweb/requestweb/__init__.py
https://github.com/azogue/dataweb/blob/085035855df7cef0fe7725bbe9a706832344d946/dataweb/requestweb/__init__.py#L99-L227
def get_data_en_intervalo(d0=None, df=None, date_fmt=DATE_FMT, usar_multithread=USAR_MULTITHREAD, max_threads_requests=MAX_THREADS_REQUESTS, timeout=TIMEOUT, num_retries=NUM_RETRIES, func_procesa_data_dia=None, func_url_data_dia=None, max_act_exec=None, verbose=True, data_extra_request=None): """ Obtiene los datos en bruto de la red realizando múltiples requests al tiempo Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame """ def _date(dia_string): if dia_string is None: return dt.date.today() elif type(dia_string) is pd.Timestamp: return dia_string.to_datetime().date() elif type(dia_string) is not dt.date: return dt.datetime.strptime(dia_string, date_fmt).date() else: return dia_string def _procesa_merge_datos_dias(lista_m, dict_data_merge): def _merge_datos_dias(key_tarea_merge, dict_merge_dias): dict_merge_dias[key_tarea_merge] = merge_data(dict_merge_dias[key_tarea_merge]) if num_dias > 1 and usar_multithread: lista_grupos = list() grupos_dias = [lista_m[i:i + DIAS_MERGE_MAX] for i in np.arange(0, num_dias, DIAS_MERGE_MAX)] for grupo in grupos_dias: lista_dfs = list() for key_g in grupo: lista_dfs.append(dict_data_merge[key_g]) lista_grupos.append(lista_dfs) keys_grupos = np.arange(len(lista_grupos)) dict_merge = dict(zip(keys_grupos, lista_grupos)) procesa_tareas_paralelo(keys_grupos, dict_merge, _merge_datos_dias, '\nMERGE DATAFRAMES DE DATOS WEB DIARIOS (%lu GRUPOS)', usar_multithread, MAX_THREADS_MERGE, verbose=verbose) dict_merge_final = {0: [dict_merge[k] for k in dict_merge.keys()]} _merge_datos_dias(0, dict_merge_final) return dict_merge_final[0] else: return merge_data(list(dict_data_merge.values())) def _hay_errores_en_datos_obtenidos(dict_data_obtenida): keys = list(sorted(dict_data_obtenida.keys())) data_es_none = [dict_data_obtenida[k] is None for k in keys] error = False if any(data_es_none): df_err = pd.DataFrame({'key': keys, 'is_bad': data_es_none}) df_err['date'] = df_err['key'].apply(lambda x: pd.Timestamp(x)) df_err['delta'] = (df_err['date'] - df_err['date'].shift(1)).fillna(3600 * 24) df_g = df_err[~df_err['is_bad']].copy() df_g['delta_g'] = (df_g['date'] - df_g['date'].shift(1)).fillna(3600 * 24) # print(df_err) # print(df_err['delta'].describe()) # print(df_g['delta_g'].describe()) if df_g['delta_g'].max() < pd.Timedelta(2, 'D'): bad_days = df_err[df_err['is_bad']]['key'].tolist() if verbose: print('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days)) logging.error('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days)) error = False else: if verbose: print('NO HAY NINGUNA TAREA REALIZADA!') logging.error('NO HAY NINGUNA TAREA REALIZADA!') bad_days = df_err['key'].tolist() error = True for k in bad_days: dict_data_obtenida.pop(k) return error def _obtiene_request(url, key, headers=None, p_req=None, json_r=False, **kwargs_r): if type(url) is list: results = [request_data_url(u, headers, num_retries, timeout, p_req, json_r, **kwargs_r) for u in url] dict_data[key] = list(zip(*results)) else: stat_response = request_data_url(url, headers, num_retries, timeout, p_req, json_r, **kwargs_r) dict_data[key] = stat_response def _obtiene_data_dia(key, dict_data_responses): url = func_url_data_dia(key) extra = dict_data_responses[key] if type(dict_data_responses[key]) is dict else {} headers = extra.pop('headers', None) json_req = extra.pop('json_req', False) params_request = extra.pop('params_request', None) try: count_process, ok = 0, -1 while count_process < num_retries and ok != 0: _obtiene_request(url, key, headers, params_request, json_req, **extra) data_import, ok = func_procesa_data_dia(key, dict_data_responses[key][1]) if ok == 0: dict_data_responses[key] = data_import elif ok == -2: # Código de salida temprana: count_process = num_retries count_process += 1 if ok != 0: dict_data_responses[key] = None except Exception as e: if verbose: print('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url)) logging.error('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url)) dict_data_responses[key] = None tic_ini = time.time() lista_dias = [dia.strftime(date_fmt) for dia in pd.date_range(_date(d0), _date(df))] if max_act_exec: # BORRAR. Es para limitar el nº de días adquiridos de golpe. lista_dias = lista_dias[:max_act_exec] num_dias = len(lista_dias) if data_extra_request is None: dict_data = dict(zip(lista_dias, np.zeros(num_dias))) else: dict_data = dict(zip(lista_dias, [data_extra_request.copy() for _ in range(num_dias)])) # IMPORTA DATOS Y LOS PROCESA procesa_tareas_paralelo(lista_dias, dict_data, _obtiene_data_dia, '\nPROCESADO DE DATOS WEB DE %lu DÍAS', usar_multithread, max_threads_requests, verbose=verbose) hay_errores = _hay_errores_en_datos_obtenidos(dict_data) # MERGE DATOS # print(len(lista_dias), len(dict_data.keys())) if not hay_errores and num_dias > 0: # data_merge = _procesa_merge_datos_dias(lista_dias, dict_data) data_merge = _procesa_merge_datos_dias(list(sorted(dict_data.keys())), dict_data) str_resumen_import = '\n%lu días importados [Proceso Total %.2f seg, %.4f seg/día]' \ % (num_dias, time.time() - tic_ini, (time.time() - tic_ini) / float(num_dias)) return data_merge, hay_errores, str_resumen_import else: return None, hay_errores, 'ERROR IMPORTANDO!!'
[ "def", "get_data_en_intervalo", "(", "d0", "=", "None", ",", "df", "=", "None", ",", "date_fmt", "=", "DATE_FMT", ",", "usar_multithread", "=", "USAR_MULTITHREAD", ",", "max_threads_requests", "=", "MAX_THREADS_REQUESTS", ",", "timeout", "=", "TIMEOUT", ",", "num_retries", "=", "NUM_RETRIES", ",", "func_procesa_data_dia", "=", "None", ",", "func_url_data_dia", "=", "None", ",", "max_act_exec", "=", "None", ",", "verbose", "=", "True", ",", "data_extra_request", "=", "None", ")", ":", "def", "_date", "(", "dia_string", ")", ":", "if", "dia_string", "is", "None", ":", "return", "dt", ".", "date", ".", "today", "(", ")", "elif", "type", "(", "dia_string", ")", "is", "pd", ".", "Timestamp", ":", "return", "dia_string", ".", "to_datetime", "(", ")", ".", "date", "(", ")", "elif", "type", "(", "dia_string", ")", "is", "not", "dt", ".", "date", ":", "return", "dt", ".", "datetime", ".", "strptime", "(", "dia_string", ",", "date_fmt", ")", ".", "date", "(", ")", "else", ":", "return", "dia_string", "def", "_procesa_merge_datos_dias", "(", "lista_m", ",", "dict_data_merge", ")", ":", "def", "_merge_datos_dias", "(", "key_tarea_merge", ",", "dict_merge_dias", ")", ":", "dict_merge_dias", "[", "key_tarea_merge", "]", "=", "merge_data", "(", "dict_merge_dias", "[", "key_tarea_merge", "]", ")", "if", "num_dias", ">", "1", "and", "usar_multithread", ":", "lista_grupos", "=", "list", "(", ")", "grupos_dias", "=", "[", "lista_m", "[", "i", ":", "i", "+", "DIAS_MERGE_MAX", "]", "for", "i", "in", "np", ".", "arange", "(", "0", ",", "num_dias", ",", "DIAS_MERGE_MAX", ")", "]", "for", "grupo", "in", "grupos_dias", ":", "lista_dfs", "=", "list", "(", ")", "for", "key_g", "in", "grupo", ":", "lista_dfs", ".", "append", "(", "dict_data_merge", "[", "key_g", "]", ")", "lista_grupos", ".", "append", "(", "lista_dfs", ")", "keys_grupos", "=", "np", ".", "arange", "(", "len", "(", "lista_grupos", ")", ")", "dict_merge", "=", "dict", "(", "zip", "(", "keys_grupos", ",", "lista_grupos", ")", ")", "procesa_tareas_paralelo", "(", "keys_grupos", ",", "dict_merge", ",", "_merge_datos_dias", ",", "'\\nMERGE DATAFRAMES DE DATOS WEB DIARIOS (%lu GRUPOS)'", ",", "usar_multithread", ",", "MAX_THREADS_MERGE", ",", "verbose", "=", "verbose", ")", "dict_merge_final", "=", "{", "0", ":", "[", "dict_merge", "[", "k", "]", "for", "k", "in", "dict_merge", ".", "keys", "(", ")", "]", "}", "_merge_datos_dias", "(", "0", ",", "dict_merge_final", ")", "return", "dict_merge_final", "[", "0", "]", "else", ":", "return", "merge_data", "(", "list", "(", "dict_data_merge", ".", "values", "(", ")", ")", ")", "def", "_hay_errores_en_datos_obtenidos", "(", "dict_data_obtenida", ")", ":", "keys", "=", "list", "(", "sorted", "(", "dict_data_obtenida", ".", "keys", "(", ")", ")", ")", "data_es_none", "=", "[", "dict_data_obtenida", "[", "k", "]", "is", "None", "for", "k", "in", "keys", "]", "error", "=", "False", "if", "any", "(", "data_es_none", ")", ":", "df_err", "=", "pd", ".", "DataFrame", "(", "{", "'key'", ":", "keys", ",", "'is_bad'", ":", "data_es_none", "}", ")", "df_err", "[", "'date'", "]", "=", "df_err", "[", "'key'", "]", ".", "apply", "(", "lambda", "x", ":", "pd", ".", "Timestamp", "(", "x", ")", ")", "df_err", "[", "'delta'", "]", "=", "(", "df_err", "[", "'date'", "]", "-", "df_err", "[", "'date'", "]", ".", "shift", "(", "1", ")", ")", ".", "fillna", "(", "3600", "*", "24", ")", "df_g", "=", "df_err", "[", "~", "df_err", "[", "'is_bad'", "]", "]", ".", "copy", "(", ")", "df_g", "[", "'delta_g'", "]", "=", "(", "df_g", "[", "'date'", "]", "-", "df_g", "[", "'date'", "]", ".", "shift", "(", "1", ")", ")", ".", "fillna", "(", "3600", "*", "24", ")", "# print(df_err)", "# print(df_err['delta'].describe())", "# print(df_g['delta_g'].describe())", "if", "df_g", "[", "'delta_g'", "]", ".", "max", "(", ")", "<", "pd", ".", "Timedelta", "(", "2", ",", "'D'", ")", ":", "bad_days", "=", "df_err", "[", "df_err", "[", "'is_bad'", "]", "]", "[", "'key'", "]", ".", "tolist", "(", ")", "if", "verbose", ":", "print", "(", "'HAY TAREAS NO REALIZADAS ({}):\\n{}'", ".", "format", "(", "len", "(", "bad_days", ")", ",", "bad_days", ")", ")", "logging", ".", "error", "(", "'HAY TAREAS NO REALIZADAS ({}):\\n{}'", ".", "format", "(", "len", "(", "bad_days", ")", ",", "bad_days", ")", ")", "error", "=", "False", "else", ":", "if", "verbose", ":", "print", "(", "'NO HAY NINGUNA TAREA REALIZADA!'", ")", "logging", ".", "error", "(", "'NO HAY NINGUNA TAREA REALIZADA!'", ")", "bad_days", "=", "df_err", "[", "'key'", "]", ".", "tolist", "(", ")", "error", "=", "True", "for", "k", "in", "bad_days", ":", "dict_data_obtenida", ".", "pop", "(", "k", ")", "return", "error", "def", "_obtiene_request", "(", "url", ",", "key", ",", "headers", "=", "None", ",", "p_req", "=", "None", ",", "json_r", "=", "False", ",", "*", "*", "kwargs_r", ")", ":", "if", "type", "(", "url", ")", "is", "list", ":", "results", "=", "[", "request_data_url", "(", "u", ",", "headers", ",", "num_retries", ",", "timeout", ",", "p_req", ",", "json_r", ",", "*", "*", "kwargs_r", ")", "for", "u", "in", "url", "]", "dict_data", "[", "key", "]", "=", "list", "(", "zip", "(", "*", "results", ")", ")", "else", ":", "stat_response", "=", "request_data_url", "(", "url", ",", "headers", ",", "num_retries", ",", "timeout", ",", "p_req", ",", "json_r", ",", "*", "*", "kwargs_r", ")", "dict_data", "[", "key", "]", "=", "stat_response", "def", "_obtiene_data_dia", "(", "key", ",", "dict_data_responses", ")", ":", "url", "=", "func_url_data_dia", "(", "key", ")", "extra", "=", "dict_data_responses", "[", "key", "]", "if", "type", "(", "dict_data_responses", "[", "key", "]", ")", "is", "dict", "else", "{", "}", "headers", "=", "extra", ".", "pop", "(", "'headers'", ",", "None", ")", "json_req", "=", "extra", ".", "pop", "(", "'json_req'", ",", "False", ")", "params_request", "=", "extra", ".", "pop", "(", "'params_request'", ",", "None", ")", "try", ":", "count_process", ",", "ok", "=", "0", ",", "-", "1", "while", "count_process", "<", "num_retries", "and", "ok", "!=", "0", ":", "_obtiene_request", "(", "url", ",", "key", ",", "headers", ",", "params_request", ",", "json_req", ",", "*", "*", "extra", ")", "data_import", ",", "ok", "=", "func_procesa_data_dia", "(", "key", ",", "dict_data_responses", "[", "key", "]", "[", "1", "]", ")", "if", "ok", "==", "0", ":", "dict_data_responses", "[", "key", "]", "=", "data_import", "elif", "ok", "==", "-", "2", ":", "# Código de salida temprana:", "count_process", "=", "num_retries", "count_process", "+=", "1", "if", "ok", "!=", "0", ":", "dict_data_responses", "[", "key", "]", "=", "None", "except", "Exception", "as", "e", ":", "if", "verbose", ":", "print", "(", "'PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'", ".", "format", "(", "e", ",", "key", ",", "url", ")", ")", "logging", ".", "error", "(", "'PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'", ".", "format", "(", "e", ",", "key", ",", "url", ")", ")", "dict_data_responses", "[", "key", "]", "=", "None", "tic_ini", "=", "time", ".", "time", "(", ")", "lista_dias", "=", "[", "dia", ".", "strftime", "(", "date_fmt", ")", "for", "dia", "in", "pd", ".", "date_range", "(", "_date", "(", "d0", ")", ",", "_date", "(", "df", ")", ")", "]", "if", "max_act_exec", ":", "# BORRAR. Es para limitar el nº de días adquiridos de golpe.", "lista_dias", "=", "lista_dias", "[", ":", "max_act_exec", "]", "num_dias", "=", "len", "(", "lista_dias", ")", "if", "data_extra_request", "is", "None", ":", "dict_data", "=", "dict", "(", "zip", "(", "lista_dias", ",", "np", ".", "zeros", "(", "num_dias", ")", ")", ")", "else", ":", "dict_data", "=", "dict", "(", "zip", "(", "lista_dias", ",", "[", "data_extra_request", ".", "copy", "(", ")", "for", "_", "in", "range", "(", "num_dias", ")", "]", ")", ")", "# IMPORTA DATOS Y LOS PROCESA", "procesa_tareas_paralelo", "(", "lista_dias", ",", "dict_data", ",", "_obtiene_data_dia", ",", "'\\nPROCESADO DE DATOS WEB DE %lu DÍAS',", "", "usar_multithread", ",", "max_threads_requests", ",", "verbose", "=", "verbose", ")", "hay_errores", "=", "_hay_errores_en_datos_obtenidos", "(", "dict_data", ")", "# MERGE DATOS", "# print(len(lista_dias), len(dict_data.keys()))", "if", "not", "hay_errores", "and", "num_dias", ">", "0", ":", "# data_merge = _procesa_merge_datos_dias(lista_dias, dict_data)", "data_merge", "=", "_procesa_merge_datos_dias", "(", "list", "(", "sorted", "(", "dict_data", ".", "keys", "(", ")", ")", ")", ",", "dict_data", ")", "str_resumen_import", "=", "'\\n%lu días importados [Proceso Total %.2f seg, %.4f seg/día]' \\", "%", "(", "num_dias", ",", "time", ".", "time", "(", ")", "-", "tic_ini", ",", "(", "time", ".", "time", "(", ")", "-", "tic_ini", ")", "/", "float", "(", "num_dias", ")", ")", "return", "data_merge", ",", "hay_errores", ",", "str_resumen_import", "else", ":", "return", "None", ",", "hay_errores", ",", "'ERROR IMPORTANDO!!'" ]
Obtiene los datos en bruto de la red realizando múltiples requests al tiempo Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame
[ "Obtiene", "los", "datos", "en", "bruto", "de", "la", "red", "realizando", "múltiples", "requests", "al", "tiempo", "Procesa", "los", "datos", "en", "bruto", "obtenidos", "de", "la", "red", "convirtiendo", "a", "Pandas", "DataFrame" ]
python
train
blockchain-certificates/cert-core
cert_core/cert_store/certificate_store.py
https://github.com/blockchain-certificates/cert-core/blob/b6a6c29bc87ccd9d2cf15a2c4301f986cfb31453/cert_core/cert_store/certificate_store.py#L72-L94
def get_certificate(self, certificate_uid): """ Returns certificate as byte array. We need this for v1 certs, which compute a binary hash. Raises KeyError if not found :param certificate_uid: :return: """ logging.debug('Retrieving certificate for uid=%s', certificate_uid) is_v1_uid = model.is_v1_uid(certificate_uid) if not is_v1_uid: return super(V1AwareCertificateStore, self).get_certificate(certificate_uid) # else it's V1.1 (if not valid, it will throw) certificate = self._find_certificate_metadata(uid=certificate_uid) if certificate: certificate_bytes = self._get_certificate_raw(certificate_uid) certificate_json = helpers.certificate_bytes_to_json(certificate_bytes) return model.to_certificate_model(certificate_json, certificate['txid'], certificate_bytes) message = 'Certificate metadata not found for certificate uid=%s' % certificate_uid logging.error(message) raise KeyError(message)
[ "def", "get_certificate", "(", "self", ",", "certificate_uid", ")", ":", "logging", ".", "debug", "(", "'Retrieving certificate for uid=%s'", ",", "certificate_uid", ")", "is_v1_uid", "=", "model", ".", "is_v1_uid", "(", "certificate_uid", ")", "if", "not", "is_v1_uid", ":", "return", "super", "(", "V1AwareCertificateStore", ",", "self", ")", ".", "get_certificate", "(", "certificate_uid", ")", "# else it's V1.1 (if not valid, it will throw)", "certificate", "=", "self", ".", "_find_certificate_metadata", "(", "uid", "=", "certificate_uid", ")", "if", "certificate", ":", "certificate_bytes", "=", "self", ".", "_get_certificate_raw", "(", "certificate_uid", ")", "certificate_json", "=", "helpers", ".", "certificate_bytes_to_json", "(", "certificate_bytes", ")", "return", "model", ".", "to_certificate_model", "(", "certificate_json", ",", "certificate", "[", "'txid'", "]", ",", "certificate_bytes", ")", "message", "=", "'Certificate metadata not found for certificate uid=%s'", "%", "certificate_uid", "logging", ".", "error", "(", "message", ")", "raise", "KeyError", "(", "message", ")" ]
Returns certificate as byte array. We need this for v1 certs, which compute a binary hash. Raises KeyError if not found :param certificate_uid: :return:
[ "Returns", "certificate", "as", "byte", "array", ".", "We", "need", "this", "for", "v1", "certs", "which", "compute", "a", "binary", "hash", ".", "Raises", "KeyError", "if", "not", "found", ":", "param", "certificate_uid", ":", ":", "return", ":" ]
python
train
ofek/bit
bit/transaction.py
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/transaction.py#L206-L370
def select_coins(target, fee, output_size, min_change, *, absolute_fee=False, consolidate=False, unspents): ''' Implementation of Branch-and-Bound coin selection defined in Erhart's Master's thesis An Evaluation of Coin Selection Strategies here: http://murch.one/wp-content/uploads/2016/11/erhardt2016coinselection.pdf :param target: The total amount of the outputs in a transaction for which we try to select the inputs to spend. :type target: ``int`` :param fee: The number of satoshi per byte for the fee of the transaction. :type fee: ``int`` :param output_size: A list containing as int the sizes of each output. :type output_size: ``list`` of ``int` :param min_change: The minimum amount of satoshis allowed for the return/change address if there is no perfect match. :type min_change: ``int`` :param absolute_fee: Whether or not the parameter ``fee`` should be repurposed to denote the exact fee amount. :type absolute_fee: ``bool`` :param consolidate: Whether or not the Branch-and-Bound process for finding a perfect match should be skipped and all unspents used directly. :type consolidate: ``bool`` :param unspents: The UTXOs to use as inputs. :type unspents: ``list`` of :class:`~bit.network.meta.Unspent` :raises InsufficientFunds: If ``unspents`` does not contain enough balance to allow spending matching the target. ''' # The maximum number of tries for Branch-and-Bound: BNB_TRIES = 1000000 # COST_OF_OVERHEAD excludes the return address of output_size (last element). COST_OF_OVERHEAD = (8 + sum(output_size[:-1]) + 1) * fee def branch_and_bound(d, selected_coins, effective_value, target, fee, sorted_unspents): # pragma: no cover nonlocal COST_OF_OVERHEAD, BNB_TRIES BNB_TRIES -= 1 COST_PER_INPUT = 148 * fee # Just typical estimate values COST_PER_OUTPUT = 34 * fee # The target we want to match includes cost of overhead for transaction target_to_match = target + COST_OF_OVERHEAD # Allowing to pay fee for a whole input and output is rationally # correct, but increases the fee-rate dramatically for only few inputs. match_range = COST_PER_INPUT + COST_PER_OUTPUT # We could allow to spend up to X% more on the fees if we can find a # perfect match: # match_range += int(0.1 * fee * sum(u.vsize for u in selected_coins)) # Check for solution and cut criteria: if effective_value > target_to_match + match_range: return [] elif effective_value >= target_to_match: return selected_coins elif BNB_TRIES <= 0: return [] elif d >= len(sorted_unspents): return [] else: # Randomly explore next branch: binary_random = randint(0, 1) if binary_random: # Explore inclusion branch first, else omission branch: effective_value_new = effective_value + \ sorted_unspents[d].amount - fee * sorted_unspents[d].vsize with_this = branch_and_bound( d + 1, selected_coins + [sorted_unspents[d]], effective_value_new, target, fee, sorted_unspents ) if with_this != []: return with_this else: without_this = branch_and_bound( d + 1, selected_coins, effective_value, target, fee, sorted_unspents ) return without_this else: # As above but explore omission branch first: without_this = branch_and_bound( d + 1, selected_coins, effective_value, target, fee, sorted_unspents ) if without_this != []: return without_this else: effective_value_new = effective_value + \ sorted_unspents[d].amount - fee * sorted_unspents[d].vsize with_this = branch_and_bound( d + 1, selected_coins + [sorted_unspents[d]], effective_value_new, target, fee, sorted_unspents ) return with_this sorted_unspents = sorted(unspents, key=lambda u: u.amount, reverse=True) selected_coins = [] if not consolidate: # Trying to find a perfect match using Branch-and-Bound: selected_coins = branch_and_bound( d=0, selected_coins=[], effective_value=0, target=target, fee=fee, sorted_unspents=sorted_unspents ) remaining = 0 # Fallback: If no match, Single Random Draw with return address: if selected_coins == []: unspents = unspents.copy() # Since we have no information on the user's spending habit it is # best practice to randomly select UTXOs until we have enough. if not consolidate: # To have a deterministic way of inserting inputs when # consolidating, we only shuffle the unspents otherwise. shuffle(unspents) while unspents: selected_coins.append(unspents.pop(0)) estimated_fee = estimate_tx_fee( sum(u.vsize for u in selected_coins), len(selected_coins), sum(output_size), len(output_size), fee ) estimated_fee = fee if absolute_fee else estimated_fee remaining = sum(u.amount for u in selected_coins) - target - estimated_fee if remaining >= min_change and (not consolidate or len(unspents) == 0): break else: raise InsufficientFunds('Balance {} is less than {} (including ' 'fee).'.format(sum( u.amount for u in selected_coins), target + min_change + estimated_fee)) return selected_coins, remaining
[ "def", "select_coins", "(", "target", ",", "fee", ",", "output_size", ",", "min_change", ",", "*", ",", "absolute_fee", "=", "False", ",", "consolidate", "=", "False", ",", "unspents", ")", ":", "# The maximum number of tries for Branch-and-Bound:", "BNB_TRIES", "=", "1000000", "# COST_OF_OVERHEAD excludes the return address of output_size (last element).", "COST_OF_OVERHEAD", "=", "(", "8", "+", "sum", "(", "output_size", "[", ":", "-", "1", "]", ")", "+", "1", ")", "*", "fee", "def", "branch_and_bound", "(", "d", ",", "selected_coins", ",", "effective_value", ",", "target", ",", "fee", ",", "sorted_unspents", ")", ":", "# pragma: no cover", "nonlocal", "COST_OF_OVERHEAD", ",", "BNB_TRIES", "BNB_TRIES", "-=", "1", "COST_PER_INPUT", "=", "148", "*", "fee", "# Just typical estimate values", "COST_PER_OUTPUT", "=", "34", "*", "fee", "# The target we want to match includes cost of overhead for transaction", "target_to_match", "=", "target", "+", "COST_OF_OVERHEAD", "# Allowing to pay fee for a whole input and output is rationally", "# correct, but increases the fee-rate dramatically for only few inputs.", "match_range", "=", "COST_PER_INPUT", "+", "COST_PER_OUTPUT", "# We could allow to spend up to X% more on the fees if we can find a", "# perfect match:", "# match_range += int(0.1 * fee * sum(u.vsize for u in selected_coins))", "# Check for solution and cut criteria:", "if", "effective_value", ">", "target_to_match", "+", "match_range", ":", "return", "[", "]", "elif", "effective_value", ">=", "target_to_match", ":", "return", "selected_coins", "elif", "BNB_TRIES", "<=", "0", ":", "return", "[", "]", "elif", "d", ">=", "len", "(", "sorted_unspents", ")", ":", "return", "[", "]", "else", ":", "# Randomly explore next branch:", "binary_random", "=", "randint", "(", "0", ",", "1", ")", "if", "binary_random", ":", "# Explore inclusion branch first, else omission branch:", "effective_value_new", "=", "effective_value", "+", "sorted_unspents", "[", "d", "]", ".", "amount", "-", "fee", "*", "sorted_unspents", "[", "d", "]", ".", "vsize", "with_this", "=", "branch_and_bound", "(", "d", "+", "1", ",", "selected_coins", "+", "[", "sorted_unspents", "[", "d", "]", "]", ",", "effective_value_new", ",", "target", ",", "fee", ",", "sorted_unspents", ")", "if", "with_this", "!=", "[", "]", ":", "return", "with_this", "else", ":", "without_this", "=", "branch_and_bound", "(", "d", "+", "1", ",", "selected_coins", ",", "effective_value", ",", "target", ",", "fee", ",", "sorted_unspents", ")", "return", "without_this", "else", ":", "# As above but explore omission branch first:", "without_this", "=", "branch_and_bound", "(", "d", "+", "1", ",", "selected_coins", ",", "effective_value", ",", "target", ",", "fee", ",", "sorted_unspents", ")", "if", "without_this", "!=", "[", "]", ":", "return", "without_this", "else", ":", "effective_value_new", "=", "effective_value", "+", "sorted_unspents", "[", "d", "]", ".", "amount", "-", "fee", "*", "sorted_unspents", "[", "d", "]", ".", "vsize", "with_this", "=", "branch_and_bound", "(", "d", "+", "1", ",", "selected_coins", "+", "[", "sorted_unspents", "[", "d", "]", "]", ",", "effective_value_new", ",", "target", ",", "fee", ",", "sorted_unspents", ")", "return", "with_this", "sorted_unspents", "=", "sorted", "(", "unspents", ",", "key", "=", "lambda", "u", ":", "u", ".", "amount", ",", "reverse", "=", "True", ")", "selected_coins", "=", "[", "]", "if", "not", "consolidate", ":", "# Trying to find a perfect match using Branch-and-Bound:", "selected_coins", "=", "branch_and_bound", "(", "d", "=", "0", ",", "selected_coins", "=", "[", "]", ",", "effective_value", "=", "0", ",", "target", "=", "target", ",", "fee", "=", "fee", ",", "sorted_unspents", "=", "sorted_unspents", ")", "remaining", "=", "0", "# Fallback: If no match, Single Random Draw with return address:", "if", "selected_coins", "==", "[", "]", ":", "unspents", "=", "unspents", ".", "copy", "(", ")", "# Since we have no information on the user's spending habit it is", "# best practice to randomly select UTXOs until we have enough.", "if", "not", "consolidate", ":", "# To have a deterministic way of inserting inputs when", "# consolidating, we only shuffle the unspents otherwise.", "shuffle", "(", "unspents", ")", "while", "unspents", ":", "selected_coins", ".", "append", "(", "unspents", ".", "pop", "(", "0", ")", ")", "estimated_fee", "=", "estimate_tx_fee", "(", "sum", "(", "u", ".", "vsize", "for", "u", "in", "selected_coins", ")", ",", "len", "(", "selected_coins", ")", ",", "sum", "(", "output_size", ")", ",", "len", "(", "output_size", ")", ",", "fee", ")", "estimated_fee", "=", "fee", "if", "absolute_fee", "else", "estimated_fee", "remaining", "=", "sum", "(", "u", ".", "amount", "for", "u", "in", "selected_coins", ")", "-", "target", "-", "estimated_fee", "if", "remaining", ">=", "min_change", "and", "(", "not", "consolidate", "or", "len", "(", "unspents", ")", "==", "0", ")", ":", "break", "else", ":", "raise", "InsufficientFunds", "(", "'Balance {} is less than {} (including '", "'fee).'", ".", "format", "(", "sum", "(", "u", ".", "amount", "for", "u", "in", "selected_coins", ")", ",", "target", "+", "min_change", "+", "estimated_fee", ")", ")", "return", "selected_coins", ",", "remaining" ]
Implementation of Branch-and-Bound coin selection defined in Erhart's Master's thesis An Evaluation of Coin Selection Strategies here: http://murch.one/wp-content/uploads/2016/11/erhardt2016coinselection.pdf :param target: The total amount of the outputs in a transaction for which we try to select the inputs to spend. :type target: ``int`` :param fee: The number of satoshi per byte for the fee of the transaction. :type fee: ``int`` :param output_size: A list containing as int the sizes of each output. :type output_size: ``list`` of ``int` :param min_change: The minimum amount of satoshis allowed for the return/change address if there is no perfect match. :type min_change: ``int`` :param absolute_fee: Whether or not the parameter ``fee`` should be repurposed to denote the exact fee amount. :type absolute_fee: ``bool`` :param consolidate: Whether or not the Branch-and-Bound process for finding a perfect match should be skipped and all unspents used directly. :type consolidate: ``bool`` :param unspents: The UTXOs to use as inputs. :type unspents: ``list`` of :class:`~bit.network.meta.Unspent` :raises InsufficientFunds: If ``unspents`` does not contain enough balance to allow spending matching the target.
[ "Implementation", "of", "Branch", "-", "and", "-", "Bound", "coin", "selection", "defined", "in", "Erhart", "s", "Master", "s", "thesis", "An", "Evaluation", "of", "Coin", "Selection", "Strategies", "here", ":", "http", ":", "//", "murch", ".", "one", "/", "wp", "-", "content", "/", "uploads", "/", "2016", "/", "11", "/", "erhardt2016coinselection", ".", "pdf" ]
python
train
slok/prometheus-python
prometheus/collectors.py
https://github.com/slok/prometheus-python/blob/51c6de3cdcd4e36eae6e1643b136f486b57a18cd/prometheus/collectors.py#L71-L88
def get_all(self): """ Returns a list populated by tuples of 2 elements, first one is a dict with all the labels and the second elemnt is the value of the metric itself """ with mutex: items = self.values.items() result = [] for k, v in items: # Check if is a single value dict (custom empty key) if not k or k == MetricDict.EMPTY_KEY: key = None else: key = decoder.decode(k) result.append((key, self.get(k))) return result
[ "def", "get_all", "(", "self", ")", ":", "with", "mutex", ":", "items", "=", "self", ".", "values", ".", "items", "(", ")", "result", "=", "[", "]", "for", "k", ",", "v", "in", "items", ":", "# Check if is a single value dict (custom empty key)", "if", "not", "k", "or", "k", "==", "MetricDict", ".", "EMPTY_KEY", ":", "key", "=", "None", "else", ":", "key", "=", "decoder", ".", "decode", "(", "k", ")", "result", ".", "append", "(", "(", "key", ",", "self", ".", "get", "(", "k", ")", ")", ")", "return", "result" ]
Returns a list populated by tuples of 2 elements, first one is a dict with all the labels and the second elemnt is the value of the metric itself
[ "Returns", "a", "list", "populated", "by", "tuples", "of", "2", "elements", "first", "one", "is", "a", "dict", "with", "all", "the", "labels", "and", "the", "second", "elemnt", "is", "the", "value", "of", "the", "metric", "itself" ]
python
train
jmoiron/micromongo
micromongo/models.py
https://github.com/jmoiron/micromongo/blob/0d7dd1396e2f25ece6648619ccff32345bc306a1/micromongo/models.py#L23-L36
def connect(*args, **kwargs): """Connect to the database. Passes arguments along to ``pymongo.connection.Connection`` unmodified. The Connection returned by this proxy method will be used by micromongo for all of its queries. Micromongo will alter the behavior of this conneciton object in some subtle ways; if you want a clean one, call ``micromongo.clean_connection`` after connecting.""" global __connection, __connection_args __connection_args = (args, dict(kwargs)) # inject our class_router kwargs['class_router'] = class_router __connection = Connection(*args, **kwargs) return __connection
[ "def", "connect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "__connection", ",", "__connection_args", "__connection_args", "=", "(", "args", ",", "dict", "(", "kwargs", ")", ")", "# inject our class_router", "kwargs", "[", "'class_router'", "]", "=", "class_router", "__connection", "=", "Connection", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "__connection" ]
Connect to the database. Passes arguments along to ``pymongo.connection.Connection`` unmodified. The Connection returned by this proxy method will be used by micromongo for all of its queries. Micromongo will alter the behavior of this conneciton object in some subtle ways; if you want a clean one, call ``micromongo.clean_connection`` after connecting.
[ "Connect", "to", "the", "database", ".", "Passes", "arguments", "along", "to", "pymongo", ".", "connection", ".", "Connection", "unmodified", "." ]
python
train
romanz/trezor-agent
libagent/gpg/decode.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L18-L40
def parse_subpackets(s): """See https://tools.ietf.org/html/rfc4880#section-5.2.3.1 for details.""" subpackets = [] total_size = s.readfmt('>H') data = s.read(total_size) s = util.Reader(io.BytesIO(data)) while True: try: first = s.readfmt('B') except EOFError: break if first < 192: subpacket_len = first elif first < 255: subpacket_len = ((first - 192) << 8) + s.readfmt('B') + 192 else: # first == 255 subpacket_len = s.readfmt('>L') subpackets.append(s.read(subpacket_len)) return subpackets
[ "def", "parse_subpackets", "(", "s", ")", ":", "subpackets", "=", "[", "]", "total_size", "=", "s", ".", "readfmt", "(", "'>H'", ")", "data", "=", "s", ".", "read", "(", "total_size", ")", "s", "=", "util", ".", "Reader", "(", "io", ".", "BytesIO", "(", "data", ")", ")", "while", "True", ":", "try", ":", "first", "=", "s", ".", "readfmt", "(", "'B'", ")", "except", "EOFError", ":", "break", "if", "first", "<", "192", ":", "subpacket_len", "=", "first", "elif", "first", "<", "255", ":", "subpacket_len", "=", "(", "(", "first", "-", "192", ")", "<<", "8", ")", "+", "s", ".", "readfmt", "(", "'B'", ")", "+", "192", "else", ":", "# first == 255", "subpacket_len", "=", "s", ".", "readfmt", "(", "'>L'", ")", "subpackets", ".", "append", "(", "s", ".", "read", "(", "subpacket_len", ")", ")", "return", "subpackets" ]
See https://tools.ietf.org/html/rfc4880#section-5.2.3.1 for details.
[ "See", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc4880#section", "-", "5", ".", "2", ".", "3", ".", "1", "for", "details", "." ]
python
train
diging/tethne
tethne/plot/__init__.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/plot/__init__.py#L100-L262
def plot_sigma(corpus, sigma, nodes=None, **kwargs): """ Plot sigma values for the ``topn`` most influential nodes. Parameters ---------- G : :class:`.GraphCollection` corpus : :class:`.Corpus` feature : str Name of a featureset in `corpus`. topn : int or float {0.-1.} (default: 20) Number (int) or percentage (float) of top-occurring features to return. If ``flist`` is provided, this parameter is ignored. sort_by : str (default: 'max') Criterion for selecting ``topn`` nodes. perslice : bool (default: False) If True, loads ``topn`` features per slice. Otherwise, loads ``topn`` features overall. If ``flist`` is provided, this parameter is ignored. flist : list List of nodes. If provided, ``topn`` and ``perslice`` are ignored. fig : :class:`matplotlib.figure.Figure` (default: None) You may provide a Figure instance if you wish. Otherwise, a new figure is generated. Returns ------- fig : :class:`matplotlib.figure.Figure` G : :class:`.GraphCollection` A co-citation graph collection, updated with ``sigma`` node attributes. Examples -------- Assuming that you have a :class:`.Corpus` (``G``) sliced by ``'date'`` and a co-citation :class:`.GraphCollection` (``corpus``)... .. code-block:: python >>> from tethne.analyze.cocitation import plot_sigma >>> fig,G = plot_sigma(G, corpus, topn=5, perslice=True) >>> fig.savefig('~/sigma_plot.png') In this figure, the top 5 most sigma-influential nodes in each slice are shown. Red bands indicate periods in which each paper was influential; opacity indicates the intensity of sigma (normalized by the highest value in the plot). The period prior to the first instance of each node is grayed out. .. figure:: _static/images/sigma_plot.png :width: 600 :align: center """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches except ImportError: raise RuntimeError('This method requires the package matplotlib.') if nodes == 'all': nodes = sigma.keys() # Display parameters. color = kwargs.get('color', 'red') years = sorted(corpus.indices['date'].keys()) width = years[1] - years[0] # Get width based on slices. height = 1.0 sort_by = kwargs.get('sort_by', 'max') perslice = kwargs.get('perslice', False) topn = kwargs.get('topn', 20) if not nodes: # Get only the topn most significant papers. include = [] if sort_by == 'max': if perslice: # Get topn per slice. vals = {} norm_by = 0. # Organize values in a way that makes selection easier. for node, history in sigma.iteritems(): years, values = history if max(values) == 0.: continue for year, val in zip(years, values): if year not in vals: vals[year] = {} vals[year][node] = val # Get the maximum values for each slice. for year, node_values in vals.iteritems(): indices = argsort(node_values.values())[-topn:][::-1] include += [node_values.keys()[i] for i in indices] max_value = max(node_values.values()) if max_value > norm_by: norm_by = max_value else: # Get topn overall. maxes = [max(v[1]) for v in sigma.values() ] indices = argsort(maxes)[-topn:][::-1] include = [sigma.keys()[i] for i in indices] norm_by = max(maxes) # Nodes to include. nodes = [node for node, values in sigma.iteritems() if max(values[1]) > 0 and node in include] # if fig is None: # Create a new Figure instance. fig = plt.figure(figsize=(10, len(nodes)/4.)) # Plot! f = 1 # Current subplot. axes = {} # Earliest year for which we have values. x_min = min([min(years) for years, values in sigma.values()]) for node in nodes: x_order = argsort(sigma[node][0]) x = sorted(sigma[node][0]) y = [sigma[node][1][i]/norm_by for i in x_order] ax = fig.add_subplot(len(nodes), 1, f) f+=1 ax.set_yticks([]) ax.set_xbound(x_min, max(years)+1) # Only show xticks on the bottom subplot. if not f == len(nodes) + 1: ax.set_xticklabels([]) # Block out years until first occurrence of feature. rect = mpatches.Rectangle((x_min, 0), x[0] - x_min, height, fill=True, linewidth=0.0) rect.set_facecolor('black') rect.set_alpha(0.1) ax.add_patch(rect) # Add a rectangle for each year, shaded according to burstness state. for d in xrange(min(x), max(x)): try: # May not have values for all years. i = x.index(d) except ValueError: continue xy = (d, 0.) state = y[i] rect = mpatches.Rectangle(xy, width, height, fill=True, linewidth=0.0) rect.set_facecolor(color) rect.set_alpha(state + 0.1) ax.add_patch(rect) ax.set_ylabel(node, rotation=0, horizontalalignment='right', verticalalignment='center') plt.subplots_adjust(left=0.5) fig.tight_layout(h_pad=0.25) plt.show()
[ "def", "plot_sigma", "(", "corpus", ",", "sigma", ",", "nodes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "patches", "as", "mpatches", "except", "ImportError", ":", "raise", "RuntimeError", "(", "'This method requires the package matplotlib.'", ")", "if", "nodes", "==", "'all'", ":", "nodes", "=", "sigma", ".", "keys", "(", ")", "# Display parameters.", "color", "=", "kwargs", ".", "get", "(", "'color'", ",", "'red'", ")", "years", "=", "sorted", "(", "corpus", ".", "indices", "[", "'date'", "]", ".", "keys", "(", ")", ")", "width", "=", "years", "[", "1", "]", "-", "years", "[", "0", "]", "# Get width based on slices.", "height", "=", "1.0", "sort_by", "=", "kwargs", ".", "get", "(", "'sort_by'", ",", "'max'", ")", "perslice", "=", "kwargs", ".", "get", "(", "'perslice'", ",", "False", ")", "topn", "=", "kwargs", ".", "get", "(", "'topn'", ",", "20", ")", "if", "not", "nodes", ":", "# Get only the topn most significant papers.", "include", "=", "[", "]", "if", "sort_by", "==", "'max'", ":", "if", "perslice", ":", "# Get topn per slice.", "vals", "=", "{", "}", "norm_by", "=", "0.", "# Organize values in a way that makes selection easier.", "for", "node", ",", "history", "in", "sigma", ".", "iteritems", "(", ")", ":", "years", ",", "values", "=", "history", "if", "max", "(", "values", ")", "==", "0.", ":", "continue", "for", "year", ",", "val", "in", "zip", "(", "years", ",", "values", ")", ":", "if", "year", "not", "in", "vals", ":", "vals", "[", "year", "]", "=", "{", "}", "vals", "[", "year", "]", "[", "node", "]", "=", "val", "# Get the maximum values for each slice.", "for", "year", ",", "node_values", "in", "vals", ".", "iteritems", "(", ")", ":", "indices", "=", "argsort", "(", "node_values", ".", "values", "(", ")", ")", "[", "-", "topn", ":", "]", "[", ":", ":", "-", "1", "]", "include", "+=", "[", "node_values", ".", "keys", "(", ")", "[", "i", "]", "for", "i", "in", "indices", "]", "max_value", "=", "max", "(", "node_values", ".", "values", "(", ")", ")", "if", "max_value", ">", "norm_by", ":", "norm_by", "=", "max_value", "else", ":", "# Get topn overall.", "maxes", "=", "[", "max", "(", "v", "[", "1", "]", ")", "for", "v", "in", "sigma", ".", "values", "(", ")", "]", "indices", "=", "argsort", "(", "maxes", ")", "[", "-", "topn", ":", "]", "[", ":", ":", "-", "1", "]", "include", "=", "[", "sigma", ".", "keys", "(", ")", "[", "i", "]", "for", "i", "in", "indices", "]", "norm_by", "=", "max", "(", "maxes", ")", "# Nodes to include.", "nodes", "=", "[", "node", "for", "node", ",", "values", "in", "sigma", ".", "iteritems", "(", ")", "if", "max", "(", "values", "[", "1", "]", ")", ">", "0", "and", "node", "in", "include", "]", "# if fig is None: # Create a new Figure instance.", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "len", "(", "nodes", ")", "/", "4.", ")", ")", "# Plot!", "f", "=", "1", "# Current subplot.", "axes", "=", "{", "}", "# Earliest year for which we have values.", "x_min", "=", "min", "(", "[", "min", "(", "years", ")", "for", "years", ",", "values", "in", "sigma", ".", "values", "(", ")", "]", ")", "for", "node", "in", "nodes", ":", "x_order", "=", "argsort", "(", "sigma", "[", "node", "]", "[", "0", "]", ")", "x", "=", "sorted", "(", "sigma", "[", "node", "]", "[", "0", "]", ")", "y", "=", "[", "sigma", "[", "node", "]", "[", "1", "]", "[", "i", "]", "/", "norm_by", "for", "i", "in", "x_order", "]", "ax", "=", "fig", ".", "add_subplot", "(", "len", "(", "nodes", ")", ",", "1", ",", "f", ")", "f", "+=", "1", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax", ".", "set_xbound", "(", "x_min", ",", "max", "(", "years", ")", "+", "1", ")", "# Only show xticks on the bottom subplot.", "if", "not", "f", "==", "len", "(", "nodes", ")", "+", "1", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "# Block out years until first occurrence of feature.", "rect", "=", "mpatches", ".", "Rectangle", "(", "(", "x_min", ",", "0", ")", ",", "x", "[", "0", "]", "-", "x_min", ",", "height", ",", "fill", "=", "True", ",", "linewidth", "=", "0.0", ")", "rect", ".", "set_facecolor", "(", "'black'", ")", "rect", ".", "set_alpha", "(", "0.1", ")", "ax", ".", "add_patch", "(", "rect", ")", "# Add a rectangle for each year, shaded according to burstness state.", "for", "d", "in", "xrange", "(", "min", "(", "x", ")", ",", "max", "(", "x", ")", ")", ":", "try", ":", "# May not have values for all years.", "i", "=", "x", ".", "index", "(", "d", ")", "except", "ValueError", ":", "continue", "xy", "=", "(", "d", ",", "0.", ")", "state", "=", "y", "[", "i", "]", "rect", "=", "mpatches", ".", "Rectangle", "(", "xy", ",", "width", ",", "height", ",", "fill", "=", "True", ",", "linewidth", "=", "0.0", ")", "rect", ".", "set_facecolor", "(", "color", ")", "rect", ".", "set_alpha", "(", "state", "+", "0.1", ")", "ax", ".", "add_patch", "(", "rect", ")", "ax", ".", "set_ylabel", "(", "node", ",", "rotation", "=", "0", ",", "horizontalalignment", "=", "'right'", ",", "verticalalignment", "=", "'center'", ")", "plt", ".", "subplots_adjust", "(", "left", "=", "0.5", ")", "fig", ".", "tight_layout", "(", "h_pad", "=", "0.25", ")", "plt", ".", "show", "(", ")" ]
Plot sigma values for the ``topn`` most influential nodes. Parameters ---------- G : :class:`.GraphCollection` corpus : :class:`.Corpus` feature : str Name of a featureset in `corpus`. topn : int or float {0.-1.} (default: 20) Number (int) or percentage (float) of top-occurring features to return. If ``flist`` is provided, this parameter is ignored. sort_by : str (default: 'max') Criterion for selecting ``topn`` nodes. perslice : bool (default: False) If True, loads ``topn`` features per slice. Otherwise, loads ``topn`` features overall. If ``flist`` is provided, this parameter is ignored. flist : list List of nodes. If provided, ``topn`` and ``perslice`` are ignored. fig : :class:`matplotlib.figure.Figure` (default: None) You may provide a Figure instance if you wish. Otherwise, a new figure is generated. Returns ------- fig : :class:`matplotlib.figure.Figure` G : :class:`.GraphCollection` A co-citation graph collection, updated with ``sigma`` node attributes. Examples -------- Assuming that you have a :class:`.Corpus` (``G``) sliced by ``'date'`` and a co-citation :class:`.GraphCollection` (``corpus``)... .. code-block:: python >>> from tethne.analyze.cocitation import plot_sigma >>> fig,G = plot_sigma(G, corpus, topn=5, perslice=True) >>> fig.savefig('~/sigma_plot.png') In this figure, the top 5 most sigma-influential nodes in each slice are shown. Red bands indicate periods in which each paper was influential; opacity indicates the intensity of sigma (normalized by the highest value in the plot). The period prior to the first instance of each node is grayed out. .. figure:: _static/images/sigma_plot.png :width: 600 :align: center
[ "Plot", "sigma", "values", "for", "the", "topn", "most", "influential", "nodes", "." ]
python
train
python-openxml/python-docx
docx/parts/image.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/parts/image.py#L50-L60
def filename(self): """ Filename from which this image part was originally created. A generic name, e.g. 'image.png', is substituted if no name is available, for example when the image was loaded from an unnamed stream. In that case a default extension is applied based on the detected MIME type of the image. """ if self._image is not None: return self._image.filename return 'image.%s' % self.partname.ext
[ "def", "filename", "(", "self", ")", ":", "if", "self", ".", "_image", "is", "not", "None", ":", "return", "self", ".", "_image", ".", "filename", "return", "'image.%s'", "%", "self", ".", "partname", ".", "ext" ]
Filename from which this image part was originally created. A generic name, e.g. 'image.png', is substituted if no name is available, for example when the image was loaded from an unnamed stream. In that case a default extension is applied based on the detected MIME type of the image.
[ "Filename", "from", "which", "this", "image", "part", "was", "originally", "created", ".", "A", "generic", "name", "e", ".", "g", ".", "image", ".", "png", "is", "substituted", "if", "no", "name", "is", "available", "for", "example", "when", "the", "image", "was", "loaded", "from", "an", "unnamed", "stream", ".", "In", "that", "case", "a", "default", "extension", "is", "applied", "based", "on", "the", "detected", "MIME", "type", "of", "the", "image", "." ]
python
train
Varkal/chuda
chuda/shell.py
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/shell.py#L174-L203
def wait_for(self, pattern, timeout=None): """ Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`): The pattern to search timeout(int): Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach """ should_continue = True if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) def stop(signum, frame): # pylint: disable=W0613 nonlocal should_continue if should_continue: raise TimeoutError() if timeout: signal.signal(signal.SIGALRM, stop) signal.alarm(timeout) while should_continue: output = self.poll_output() + self.poll_error() filtered = [line for line in output if re.match(pattern, line)] if filtered: should_continue = False
[ "def", "wait_for", "(", "self", ",", "pattern", ",", "timeout", "=", "None", ")", ":", "should_continue", "=", "True", "if", "self", ".", "block", ":", "raise", "TypeError", "(", "NON_BLOCKING_ERROR_MESSAGE", ")", "def", "stop", "(", "signum", ",", "frame", ")", ":", "# pylint: disable=W0613", "nonlocal", "should_continue", "if", "should_continue", ":", "raise", "TimeoutError", "(", ")", "if", "timeout", ":", "signal", ".", "signal", "(", "signal", ".", "SIGALRM", ",", "stop", ")", "signal", ".", "alarm", "(", "timeout", ")", "while", "should_continue", ":", "output", "=", "self", ".", "poll_output", "(", ")", "+", "self", ".", "poll_error", "(", ")", "filtered", "=", "[", "line", "for", "line", "in", "output", "if", "re", ".", "match", "(", "pattern", ",", "line", ")", "]", "if", "filtered", ":", "should_continue", "=", "False" ]
Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`): The pattern to search timeout(int): Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach
[ "Block", "until", "a", "pattern", "have", "been", "found", "in", "stdout", "and", "stderr" ]
python
train
freakboy3742/pyxero
examples/public_oauth_flow/runserver.py
https://github.com/freakboy3742/pyxero/blob/5566f17fa06ed1f2fb9426c112951a72276b0f9a/examples/public_oauth_flow/runserver.py#L54-L126
def do_GET(self): """ Handle GET request """ consumer_key = os.environ.get('XERO_CONSUMER_KEY') consumer_secret = os.environ.get('XERO_CONSUMER_SECRET') if consumer_key is None or consumer_secret is None: raise KeyError( 'Please define both XERO_CONSUMER_KEY and XERO_CONSUMER_SECRET environment variables') print("Serving path: {}".format(self.path)) path = urlparse(self.path) if path.path == '/do-auth': credentials = PublicCredentials( consumer_key, consumer_secret, callback_uri='http://localhost:8000/oauth') # Save generated credentials details to persistent storage for key, value in credentials.state.items(): OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value}) # Redirect to Xero at url provided by credentials generation self.redirect_response(credentials.url) return elif path.path == '/oauth': params = dict(parse_qsl(path.query)) if 'oauth_token' not in params or 'oauth_verifier' not in params or 'org' not in params: self.send_error(500, message='Missing parameters required.') return stored_values = OAUTH_PERSISTENT_SERVER_STORAGE credentials = PublicCredentials(**stored_values) try: credentials.verify(params['oauth_verifier']) # Resave our verified credentials for key, value in credentials.state.items(): OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value}) except XeroException as e: self.send_error(500, message='{}: {}'.format(e.__class__, e.message)) return # Once verified, api can be invoked with xero = Xero(credentials) self.redirect_response('/verified') return elif path.path == '/verified': stored_values = OAUTH_PERSISTENT_SERVER_STORAGE credentials = PublicCredentials(**stored_values) try: xero = Xero(credentials) except XeroException as e: self.send_error(500, message='{}: {}'.format(e.__class__, e.message)) return page_body = 'Your contacts:<br><br>' contacts = xero.contacts.all() if contacts: page_body += '<br>'.join([str(contact) for contact in contacts]) else: page_body += 'No contacts' self.page_response(title='Xero Contacts', body=page_body) return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
[ "def", "do_GET", "(", "self", ")", ":", "consumer_key", "=", "os", ".", "environ", ".", "get", "(", "'XERO_CONSUMER_KEY'", ")", "consumer_secret", "=", "os", ".", "environ", ".", "get", "(", "'XERO_CONSUMER_SECRET'", ")", "if", "consumer_key", "is", "None", "or", "consumer_secret", "is", "None", ":", "raise", "KeyError", "(", "'Please define both XERO_CONSUMER_KEY and XERO_CONSUMER_SECRET environment variables'", ")", "print", "(", "\"Serving path: {}\"", ".", "format", "(", "self", ".", "path", ")", ")", "path", "=", "urlparse", "(", "self", ".", "path", ")", "if", "path", ".", "path", "==", "'/do-auth'", ":", "credentials", "=", "PublicCredentials", "(", "consumer_key", ",", "consumer_secret", ",", "callback_uri", "=", "'http://localhost:8000/oauth'", ")", "# Save generated credentials details to persistent storage", "for", "key", ",", "value", "in", "credentials", ".", "state", ".", "items", "(", ")", ":", "OAUTH_PERSISTENT_SERVER_STORAGE", ".", "update", "(", "{", "key", ":", "value", "}", ")", "# Redirect to Xero at url provided by credentials generation", "self", ".", "redirect_response", "(", "credentials", ".", "url", ")", "return", "elif", "path", ".", "path", "==", "'/oauth'", ":", "params", "=", "dict", "(", "parse_qsl", "(", "path", ".", "query", ")", ")", "if", "'oauth_token'", "not", "in", "params", "or", "'oauth_verifier'", "not", "in", "params", "or", "'org'", "not", "in", "params", ":", "self", ".", "send_error", "(", "500", ",", "message", "=", "'Missing parameters required.'", ")", "return", "stored_values", "=", "OAUTH_PERSISTENT_SERVER_STORAGE", "credentials", "=", "PublicCredentials", "(", "*", "*", "stored_values", ")", "try", ":", "credentials", ".", "verify", "(", "params", "[", "'oauth_verifier'", "]", ")", "# Resave our verified credentials", "for", "key", ",", "value", "in", "credentials", ".", "state", ".", "items", "(", ")", ":", "OAUTH_PERSISTENT_SERVER_STORAGE", ".", "update", "(", "{", "key", ":", "value", "}", ")", "except", "XeroException", "as", "e", ":", "self", ".", "send_error", "(", "500", ",", "message", "=", "'{}: {}'", ".", "format", "(", "e", ".", "__class__", ",", "e", ".", "message", ")", ")", "return", "# Once verified, api can be invoked with xero = Xero(credentials)", "self", ".", "redirect_response", "(", "'/verified'", ")", "return", "elif", "path", ".", "path", "==", "'/verified'", ":", "stored_values", "=", "OAUTH_PERSISTENT_SERVER_STORAGE", "credentials", "=", "PublicCredentials", "(", "*", "*", "stored_values", ")", "try", ":", "xero", "=", "Xero", "(", "credentials", ")", "except", "XeroException", "as", "e", ":", "self", ".", "send_error", "(", "500", ",", "message", "=", "'{}: {}'", ".", "format", "(", "e", ".", "__class__", ",", "e", ".", "message", ")", ")", "return", "page_body", "=", "'Your contacts:<br><br>'", "contacts", "=", "xero", ".", "contacts", ".", "all", "(", ")", "if", "contacts", ":", "page_body", "+=", "'<br>'", ".", "join", "(", "[", "str", "(", "contact", ")", "for", "contact", "in", "contacts", "]", ")", "else", ":", "page_body", "+=", "'No contacts'", "self", ".", "page_response", "(", "title", "=", "'Xero Contacts'", ",", "body", "=", "page_body", ")", "return", "SimpleHTTPServer", ".", "SimpleHTTPRequestHandler", ".", "do_GET", "(", "self", ")" ]
Handle GET request
[ "Handle", "GET", "request" ]
python
train
awslabs/serverless-application-model
samtranslator/model/__init__.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/__init__.py#L189-L213
def _generate_resource_dict(self): """Generates the resource dict for this Resource, the value associated with the logical id in a CloudFormation template's Resources section. :returns: the resource dict for this Resource :rtype: dict """ resource_dict = {} resource_dict['Type'] = self.resource_type if self.depends_on: resource_dict['DependsOn'] = self.depends_on resource_dict.update(self.resource_attributes) properties_dict = {} for name in self.property_types: value = getattr(self, name) if value is not None: properties_dict[name] = value resource_dict['Properties'] = properties_dict return resource_dict
[ "def", "_generate_resource_dict", "(", "self", ")", ":", "resource_dict", "=", "{", "}", "resource_dict", "[", "'Type'", "]", "=", "self", ".", "resource_type", "if", "self", ".", "depends_on", ":", "resource_dict", "[", "'DependsOn'", "]", "=", "self", ".", "depends_on", "resource_dict", ".", "update", "(", "self", ".", "resource_attributes", ")", "properties_dict", "=", "{", "}", "for", "name", "in", "self", ".", "property_types", ":", "value", "=", "getattr", "(", "self", ",", "name", ")", "if", "value", "is", "not", "None", ":", "properties_dict", "[", "name", "]", "=", "value", "resource_dict", "[", "'Properties'", "]", "=", "properties_dict", "return", "resource_dict" ]
Generates the resource dict for this Resource, the value associated with the logical id in a CloudFormation template's Resources section. :returns: the resource dict for this Resource :rtype: dict
[ "Generates", "the", "resource", "dict", "for", "this", "Resource", "the", "value", "associated", "with", "the", "logical", "id", "in", "a", "CloudFormation", "template", "s", "Resources", "section", "." ]
python
train
JohnDoee/thomas
thomas/outputs/http.py
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/outputs/http.py#L278-L311
def makeProducer(self, request, fileForReading): """ Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. """ byteRange = request.getHeader(b'range') if byteRange is None or not self.getFileSize(): self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) try: parsedRanges = self._parseRangeHeader(byteRange) except ValueError: logger.warning("Ignoring malformed Range header %r" % (byteRange,)) self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) if len(parsedRanges) == 1: offset, size = self._doSingleRangeRequest( request, parsedRanges[0]) self._setContentHeaders(request, size) return SingleRangeStaticProducer( request, fileForReading, offset, size) else: rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) return MultipleRangeStaticProducer( request, fileForReading, rangeInfo)
[ "def", "makeProducer", "(", "self", ",", "request", ",", "fileForReading", ")", ":", "byteRange", "=", "request", ".", "getHeader", "(", "b'range'", ")", "if", "byteRange", "is", "None", "or", "not", "self", ".", "getFileSize", "(", ")", ":", "self", ".", "_setContentHeaders", "(", "request", ")", "request", ".", "setResponseCode", "(", "http", ".", "OK", ")", "return", "NoRangeStaticProducer", "(", "request", ",", "fileForReading", ")", "try", ":", "parsedRanges", "=", "self", ".", "_parseRangeHeader", "(", "byteRange", ")", "except", "ValueError", ":", "logger", ".", "warning", "(", "\"Ignoring malformed Range header %r\"", "%", "(", "byteRange", ",", ")", ")", "self", ".", "_setContentHeaders", "(", "request", ")", "request", ".", "setResponseCode", "(", "http", ".", "OK", ")", "return", "NoRangeStaticProducer", "(", "request", ",", "fileForReading", ")", "if", "len", "(", "parsedRanges", ")", "==", "1", ":", "offset", ",", "size", "=", "self", ".", "_doSingleRangeRequest", "(", "request", ",", "parsedRanges", "[", "0", "]", ")", "self", ".", "_setContentHeaders", "(", "request", ",", "size", ")", "return", "SingleRangeStaticProducer", "(", "request", ",", "fileForReading", ",", "offset", ",", "size", ")", "else", ":", "rangeInfo", "=", "self", ".", "_doMultipleRangeRequest", "(", "request", ",", "parsedRanges", ")", "return", "MultipleRangeStaticProducer", "(", "request", ",", "fileForReading", ",", "rangeInfo", ")" ]
Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response.
[ "Make", "a", "L", "{", "StaticProducer", "}", "that", "will", "produce", "the", "body", "of", "this", "response", "." ]
python
train
robehickman/simple-http-file-sync
shttpfs/versioned_storage.py
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/versioned_storage.py#L113-L118
def have_active_commit(self): """ Checks if there is an active commit owned by the specified user """ commit_state = sfs.file_or_default(sfs.cpjoin(self.base_path, 'active_commit'), None) if commit_state != None: return True return False
[ "def", "have_active_commit", "(", "self", ")", ":", "commit_state", "=", "sfs", ".", "file_or_default", "(", "sfs", ".", "cpjoin", "(", "self", ".", "base_path", ",", "'active_commit'", ")", ",", "None", ")", "if", "commit_state", "!=", "None", ":", "return", "True", "return", "False" ]
Checks if there is an active commit owned by the specified user
[ "Checks", "if", "there", "is", "an", "active", "commit", "owned", "by", "the", "specified", "user" ]
python
train
Phelimb/ga4gh-mongo
ga4ghmongo/schema/models/variants.py
https://github.com/Phelimb/ga4gh-mongo/blob/5f5a3e1922be0e0d13af1874fad6eed5418ee761/ga4ghmongo/schema/models/variants.py#L292-L308
def is_deletion(reference_bases, alternate_bases): """ Return whether or not the INDEL is a deletion """ # if multiple alts, it is unclear if we have a transition if len(alternate_bases) > 1: return False if is_indel(reference_bases, alternate_bases): # just one alt allele alt_allele = alternate_bases[0] if alt_allele is None: return True if len(reference_bases) > len(alt_allele): return True else: return False else: return False
[ "def", "is_deletion", "(", "reference_bases", ",", "alternate_bases", ")", ":", "# if multiple alts, it is unclear if we have a transition", "if", "len", "(", "alternate_bases", ")", ">", "1", ":", "return", "False", "if", "is_indel", "(", "reference_bases", ",", "alternate_bases", ")", ":", "# just one alt allele", "alt_allele", "=", "alternate_bases", "[", "0", "]", "if", "alt_allele", "is", "None", ":", "return", "True", "if", "len", "(", "reference_bases", ")", ">", "len", "(", "alt_allele", ")", ":", "return", "True", "else", ":", "return", "False", "else", ":", "return", "False" ]
Return whether or not the INDEL is a deletion
[ "Return", "whether", "or", "not", "the", "INDEL", "is", "a", "deletion" ]
python
train
ghukill/pyfc4
pyfc4/plugins/pcdm/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/plugins/pcdm/models.py#L232-L274
def _post_create(self, auto_refresh=False): ''' resource.create() hook ''' # set PCDM triple as Object self.add_triple(self.rdf.prefixes.rdf.type, self.rdf.prefixes.pcdm.Object) self.update(auto_refresh=auto_refresh) # create /files child resource files_child = PCDMFilesContainer( self.repo, '%s/files' % self.uri_as_string(), membershipResource=self.uri, hasMemberRelation=self.rdf.prefixes.pcdm.hasFile) files_child.create(specify_uri=True) # create /members child resource members_child = PCDMMembersContainer( self.repo, '%s/members' % self.uri_as_string(), membershipResource=self.uri, hasMemberRelation=self.rdf.prefixes.pcdm.hasMember, insertedContentRelation=self.rdf.prefixes.ore.proxyFor) members_child.create(specify_uri=True) # create /related child resource related_child = PCDMRelatedContainer( self.repo, '%s/related' % self.uri_as_string(), membershipResource=self.uri, hasMemberRelation=self.rdf.prefixes.ore.aggregates, insertedContentRelation=self.rdf.prefixes.ore.proxyFor) related_child.create(specify_uri=True) # create /associated child resource associated_child = PCDMAssociatedContainer( self.repo, '%s/associated' % self.uri_as_string(), membershipResource=self.uri, hasMemberRelation=self.rdf.prefixes.pcdm.hasRelatedFile) associated_child.create(specify_uri=True)
[ "def", "_post_create", "(", "self", ",", "auto_refresh", "=", "False", ")", ":", "# set PCDM triple as Object", "self", ".", "add_triple", "(", "self", ".", "rdf", ".", "prefixes", ".", "rdf", ".", "type", ",", "self", ".", "rdf", ".", "prefixes", ".", "pcdm", ".", "Object", ")", "self", ".", "update", "(", "auto_refresh", "=", "auto_refresh", ")", "# create /files child resource", "files_child", "=", "PCDMFilesContainer", "(", "self", ".", "repo", ",", "'%s/files'", "%", "self", ".", "uri_as_string", "(", ")", ",", "membershipResource", "=", "self", ".", "uri", ",", "hasMemberRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "pcdm", ".", "hasFile", ")", "files_child", ".", "create", "(", "specify_uri", "=", "True", ")", "# create /members child resource", "members_child", "=", "PCDMMembersContainer", "(", "self", ".", "repo", ",", "'%s/members'", "%", "self", ".", "uri_as_string", "(", ")", ",", "membershipResource", "=", "self", ".", "uri", ",", "hasMemberRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "pcdm", ".", "hasMember", ",", "insertedContentRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "ore", ".", "proxyFor", ")", "members_child", ".", "create", "(", "specify_uri", "=", "True", ")", "# create /related child resource", "related_child", "=", "PCDMRelatedContainer", "(", "self", ".", "repo", ",", "'%s/related'", "%", "self", ".", "uri_as_string", "(", ")", ",", "membershipResource", "=", "self", ".", "uri", ",", "hasMemberRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "ore", ".", "aggregates", ",", "insertedContentRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "ore", ".", "proxyFor", ")", "related_child", ".", "create", "(", "specify_uri", "=", "True", ")", "# create /associated child resource", "associated_child", "=", "PCDMAssociatedContainer", "(", "self", ".", "repo", ",", "'%s/associated'", "%", "self", ".", "uri_as_string", "(", ")", ",", "membershipResource", "=", "self", ".", "uri", ",", "hasMemberRelation", "=", "self", ".", "rdf", ".", "prefixes", ".", "pcdm", ".", "hasRelatedFile", ")", "associated_child", ".", "create", "(", "specify_uri", "=", "True", ")" ]
resource.create() hook
[ "resource", ".", "create", "()", "hook" ]
python
train
chrisspen/burlap
burlap/vm.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vm.py#L597-L628
def delete(name=None, group=None, release=None, except_release=None, dryrun=1, verbose=1): """ Permanently erase one or more VM instances from existence. """ verbose = int(verbose) if env.vm_type == EC2: conn = get_ec2_connection() instances = list_instances( name=name, group=group, release=release, except_release=except_release, ) for instance_name, instance_data in instances.items(): public_dns_name = instance_data['public_dns_name'] print('\nDeleting %s (%s)...' \ % (instance_name, instance_data['id'])) if not get_dryrun(): conn.terminate_instances(instance_ids=[instance_data['id']]) # Clear host key on localhost. known_hosts = os.path.expanduser('~/.ssh/known_hosts') cmd = 'ssh-keygen -f "%s" -R %s' % (known_hosts, public_dns_name) local_or_dryrun(cmd) else: raise NotImplementedError
[ "def", "delete", "(", "name", "=", "None", ",", "group", "=", "None", ",", "release", "=", "None", ",", "except_release", "=", "None", ",", "dryrun", "=", "1", ",", "verbose", "=", "1", ")", ":", "verbose", "=", "int", "(", "verbose", ")", "if", "env", ".", "vm_type", "==", "EC2", ":", "conn", "=", "get_ec2_connection", "(", ")", "instances", "=", "list_instances", "(", "name", "=", "name", ",", "group", "=", "group", ",", "release", "=", "release", ",", "except_release", "=", "except_release", ",", ")", "for", "instance_name", ",", "instance_data", "in", "instances", ".", "items", "(", ")", ":", "public_dns_name", "=", "instance_data", "[", "'public_dns_name'", "]", "print", "(", "'\\nDeleting %s (%s)...'", "%", "(", "instance_name", ",", "instance_data", "[", "'id'", "]", ")", ")", "if", "not", "get_dryrun", "(", ")", ":", "conn", ".", "terminate_instances", "(", "instance_ids", "=", "[", "instance_data", "[", "'id'", "]", "]", ")", "# Clear host key on localhost.", "known_hosts", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/known_hosts'", ")", "cmd", "=", "'ssh-keygen -f \"%s\" -R %s'", "%", "(", "known_hosts", ",", "public_dns_name", ")", "local_or_dryrun", "(", "cmd", ")", "else", ":", "raise", "NotImplementedError" ]
Permanently erase one or more VM instances from existence.
[ "Permanently", "erase", "one", "or", "more", "VM", "instances", "from", "existence", "." ]
python
valid
apache/incubator-mxnet
plugin/opencv/opencv.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L29-L49
def imdecode(str_img, flag=1): """Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order """ hdl = NDArrayHandle() check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img), mx_uint(len(str_img)), flag, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
[ "def", "imdecode", "(", "str_img", ",", "flag", "=", "1", ")", ":", "hdl", "=", "NDArrayHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXCVImdecode", "(", "ctypes", ".", "c_char_p", "(", "str_img", ")", ",", "mx_uint", "(", "len", "(", "str_img", ")", ")", ",", "flag", ",", "ctypes", ".", "byref", "(", "hdl", ")", ")", ")", "return", "mx", ".", "nd", ".", "NDArray", "(", "hdl", ")" ]
Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order
[ "Decode", "image", "from", "str", "buffer", ".", "Wrapper", "for", "cv2", ".", "imdecode", "that", "uses", "mx", ".", "nd", ".", "NDArray" ]
python
train
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L226-L254
def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands))
[ "def", "get_distutils_display_options", "(", ")", ":", "short_display_opts", "=", "set", "(", "'-'", "+", "o", "[", "1", "]", "for", "o", "in", "Distribution", ".", "display_options", "if", "o", "[", "1", "]", ")", "long_display_opts", "=", "set", "(", "'--'", "+", "o", "[", "0", "]", "for", "o", "in", "Distribution", ".", "display_options", ")", "# Include -h and --help which are not explicitly listed in", "# Distribution.display_options (as they are handled by optparse)", "short_display_opts", ".", "add", "(", "'-h'", ")", "long_display_opts", ".", "add", "(", "'--help'", ")", "# This isn't the greatest approach to hardcode these commands.", "# However, there doesn't seem to be a good way to determine", "# whether build *will be* run as part of the command at this", "# phase.", "display_commands", "=", "set", "(", "[", "'clean'", ",", "'register'", ",", "'setopt'", ",", "'saveopts'", ",", "'egg_info'", ",", "'alias'", "]", ")", "return", "short_display_opts", ".", "union", "(", "long_display_opts", ".", "union", "(", "display_commands", ")", ")" ]
Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or --
[ "Returns", "a", "set", "of", "all", "the", "distutils", "display", "options", "in", "their", "long", "and", "short", "forms", ".", "These", "are", "the", "setup", ".", "py", "arguments", "such", "as", "--", "name", "or", "--", "version", "which", "print", "the", "project", "s", "metadata", "and", "then", "exit", "." ]
python
train
angr/angr
angr/analyses/cfg/segment_list.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/segment_list.py#L315-L354
def next_pos_with_sort_not_in(self, address, sorts, max_distance=None): """ Returns the address of the next occupied block whose sort is not one of the specified ones. :param int address: The address to begin the search with (including itself). :param sorts: A collection of sort strings. :param max_distance: The maximum distance between `address` and the next position. Search will stop after we come across an occupied position that is beyond `address` + max_distance. This check will be disabled if `max_distance` is set to None. :return: The next occupied position whose sort is not one of the specified ones, or None if no such position exists. :rtype: int or None """ list_length = len(self._list) idx = self._search(address) if idx < list_length: # Occupied block = self._list[idx] if max_distance is not None and address + max_distance < block.start: return None if block.start <= address < block.end: # the address is inside the current block if block.sort not in sorts: return address # tick the idx forward by 1 idx += 1 i = idx while i < list_length: if max_distance is not None and address + max_distance < self._list[i].start: return None if self._list[i].sort not in sorts: return self._list[i].start i += 1 return None
[ "def", "next_pos_with_sort_not_in", "(", "self", ",", "address", ",", "sorts", ",", "max_distance", "=", "None", ")", ":", "list_length", "=", "len", "(", "self", ".", "_list", ")", "idx", "=", "self", ".", "_search", "(", "address", ")", "if", "idx", "<", "list_length", ":", "# Occupied", "block", "=", "self", ".", "_list", "[", "idx", "]", "if", "max_distance", "is", "not", "None", "and", "address", "+", "max_distance", "<", "block", ".", "start", ":", "return", "None", "if", "block", ".", "start", "<=", "address", "<", "block", ".", "end", ":", "# the address is inside the current block", "if", "block", ".", "sort", "not", "in", "sorts", ":", "return", "address", "# tick the idx forward by 1", "idx", "+=", "1", "i", "=", "idx", "while", "i", "<", "list_length", ":", "if", "max_distance", "is", "not", "None", "and", "address", "+", "max_distance", "<", "self", ".", "_list", "[", "i", "]", ".", "start", ":", "return", "None", "if", "self", ".", "_list", "[", "i", "]", ".", "sort", "not", "in", "sorts", ":", "return", "self", ".", "_list", "[", "i", "]", ".", "start", "i", "+=", "1", "return", "None" ]
Returns the address of the next occupied block whose sort is not one of the specified ones. :param int address: The address to begin the search with (including itself). :param sorts: A collection of sort strings. :param max_distance: The maximum distance between `address` and the next position. Search will stop after we come across an occupied position that is beyond `address` + max_distance. This check will be disabled if `max_distance` is set to None. :return: The next occupied position whose sort is not one of the specified ones, or None if no such position exists. :rtype: int or None
[ "Returns", "the", "address", "of", "the", "next", "occupied", "block", "whose", "sort", "is", "not", "one", "of", "the", "specified", "ones", "." ]
python
train
ralphbean/taskw
taskw/utils.py
https://github.com/ralphbean/taskw/blob/11e2f9132eaedd157f514538de9b5f3b69c30a52/taskw/utils.py#L164-L183
def decode_task(line): """ Parse a single record (task) from a task database file. I don't understand why they don't just use JSON or YAML. But that's okay. >>> decode_task('[description:"Make a python API for taskwarrior"]') {'description': 'Make a python API for taskwarrior'} """ task = {} for key, value in re.findall(r'(\w+):"(.*?)(?<!\\)"', line): value = value.replace('\\"', '"') # unescape quotes task[key] = value for unsafe, safe in six.iteritems(decode_replacements): task[key] = task[key].replace(unsafe, safe) if 'tags' in task: task['tags'] = task['tags'].split(',') return task
[ "def", "decode_task", "(", "line", ")", ":", "task", "=", "{", "}", "for", "key", ",", "value", "in", "re", ".", "findall", "(", "r'(\\w+):\"(.*?)(?<!\\\\)\"'", ",", "line", ")", ":", "value", "=", "value", ".", "replace", "(", "'\\\\\"'", ",", "'\"'", ")", "# unescape quotes", "task", "[", "key", "]", "=", "value", "for", "unsafe", ",", "safe", "in", "six", ".", "iteritems", "(", "decode_replacements", ")", ":", "task", "[", "key", "]", "=", "task", "[", "key", "]", ".", "replace", "(", "unsafe", ",", "safe", ")", "if", "'tags'", "in", "task", ":", "task", "[", "'tags'", "]", "=", "task", "[", "'tags'", "]", ".", "split", "(", "','", ")", "return", "task" ]
Parse a single record (task) from a task database file. I don't understand why they don't just use JSON or YAML. But that's okay. >>> decode_task('[description:"Make a python API for taskwarrior"]') {'description': 'Make a python API for taskwarrior'}
[ "Parse", "a", "single", "record", "(", "task", ")", "from", "a", "task", "database", "file", "." ]
python
train
learningequality/ricecooker
ricecooker/chefs.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/chefs.py#L443-L477
def pre_run(self, args, options): """ This function is called before `run` in order to build the json tree. """ if 'generate' in args and args['generate']: self.metadata_provider = CsvMetadataProvider(args['channeldir'], channelinfo=args['channelinfo'], contentinfo=args['contentinfo'], exercisesinfo=args['exercisesinfo'], questionsinfo=args['questionsinfo'], validate_and_cache=False) self.metadata_provider.generate_templates(exercise_questions=True) self.metadata_provider.generate_contentinfo_from_channeldir(args, options) sys.exit(0) elif 'importstudioid' in args and args['importstudioid']: studio_id = args['importstudioid'] config.LOGGER.info("Calling with importstudioid... " + studio_id) self.metadata_provider = CsvMetadataProvider(args['channeldir'], channelinfo=args['channelinfo'], contentinfo=args['contentinfo'], exercisesinfo=args['exercisesinfo'], questionsinfo=args['questionsinfo'], validate_and_cache=False) self.metadata_provider.generate_templates(exercise_questions=True) self.metadata_provider.generate_exercises_from_importstudioid(args, options) sys.exit(0) if self.metadata_provider is None: self._init_metadata_provider(args, options) kwargs = {} # combined dictionary of argparse args and extra options kwargs.update(args) kwargs.update(options) json_tree_path = self.get_json_tree_path(**kwargs) build_ricecooker_json_tree(args, options, self.metadata_provider, json_tree_path)
[ "def", "pre_run", "(", "self", ",", "args", ",", "options", ")", ":", "if", "'generate'", "in", "args", "and", "args", "[", "'generate'", "]", ":", "self", ".", "metadata_provider", "=", "CsvMetadataProvider", "(", "args", "[", "'channeldir'", "]", ",", "channelinfo", "=", "args", "[", "'channelinfo'", "]", ",", "contentinfo", "=", "args", "[", "'contentinfo'", "]", ",", "exercisesinfo", "=", "args", "[", "'exercisesinfo'", "]", ",", "questionsinfo", "=", "args", "[", "'questionsinfo'", "]", ",", "validate_and_cache", "=", "False", ")", "self", ".", "metadata_provider", ".", "generate_templates", "(", "exercise_questions", "=", "True", ")", "self", ".", "metadata_provider", ".", "generate_contentinfo_from_channeldir", "(", "args", ",", "options", ")", "sys", ".", "exit", "(", "0", ")", "elif", "'importstudioid'", "in", "args", "and", "args", "[", "'importstudioid'", "]", ":", "studio_id", "=", "args", "[", "'importstudioid'", "]", "config", ".", "LOGGER", ".", "info", "(", "\"Calling with importstudioid... \"", "+", "studio_id", ")", "self", ".", "metadata_provider", "=", "CsvMetadataProvider", "(", "args", "[", "'channeldir'", "]", ",", "channelinfo", "=", "args", "[", "'channelinfo'", "]", ",", "contentinfo", "=", "args", "[", "'contentinfo'", "]", ",", "exercisesinfo", "=", "args", "[", "'exercisesinfo'", "]", ",", "questionsinfo", "=", "args", "[", "'questionsinfo'", "]", ",", "validate_and_cache", "=", "False", ")", "self", ".", "metadata_provider", ".", "generate_templates", "(", "exercise_questions", "=", "True", ")", "self", ".", "metadata_provider", ".", "generate_exercises_from_importstudioid", "(", "args", ",", "options", ")", "sys", ".", "exit", "(", "0", ")", "if", "self", ".", "metadata_provider", "is", "None", ":", "self", ".", "_init_metadata_provider", "(", "args", ",", "options", ")", "kwargs", "=", "{", "}", "# combined dictionary of argparse args and extra options", "kwargs", ".", "update", "(", "args", ")", "kwargs", ".", "update", "(", "options", ")", "json_tree_path", "=", "self", ".", "get_json_tree_path", "(", "*", "*", "kwargs", ")", "build_ricecooker_json_tree", "(", "args", ",", "options", ",", "self", ".", "metadata_provider", ",", "json_tree_path", ")" ]
This function is called before `run` in order to build the json tree.
[ "This", "function", "is", "called", "before", "run", "in", "order", "to", "build", "the", "json", "tree", "." ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1909-L1928
def _remove_from_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to remove a PTR from a VD, removing space from the VD if necessary. Parameters: ptr - The PTR to remove from the VD. Returns: The number of bytes to remove from the VDs (this may be zero). ''' num_bytes_to_remove = 0 for pvd in self.pvds: # The remove_from_ptr_size() returns True if the PVD no longer # needs the extra extents in the PTR that stored this directory. # We always remove 4 additional extents for that. if pvd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_remove += 4 * self.pvd.logical_block_size() return num_bytes_to_remove
[ "def", "_remove_from_ptr_size", "(", "self", ",", "ptr", ")", ":", "# type: (path_table_record.PathTableRecord) -> int", "num_bytes_to_remove", "=", "0", "for", "pvd", "in", "self", ".", "pvds", ":", "# The remove_from_ptr_size() returns True if the PVD no longer", "# needs the extra extents in the PTR that stored this directory.", "# We always remove 4 additional extents for that.", "if", "pvd", ".", "remove_from_ptr_size", "(", "path_table_record", ".", "PathTableRecord", ".", "record_length", "(", "ptr", ".", "len_di", ")", ")", ":", "num_bytes_to_remove", "+=", "4", "*", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "return", "num_bytes_to_remove" ]
An internal method to remove a PTR from a VD, removing space from the VD if necessary. Parameters: ptr - The PTR to remove from the VD. Returns: The number of bytes to remove from the VDs (this may be zero).
[ "An", "internal", "method", "to", "remove", "a", "PTR", "from", "a", "VD", "removing", "space", "from", "the", "VD", "if", "necessary", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/search_api.py#L915-L935
def search_cloud_integration_entities(self, **kwargs): # noqa: E501 """Search over a customer's non-deleted cloud integrations # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_cloud_integration_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedCloudIntegration If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "search_cloud_integration_entities", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_cloud_integration_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_cloud_integration_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Search over a customer's non-deleted cloud integrations # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_cloud_integration_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedCloudIntegration If the method is called asynchronously, returns the request thread.
[ "Search", "over", "a", "customer", "s", "non", "-", "deleted", "cloud", "integrations", "#", "noqa", ":", "E501" ]
python
train
QuantEcon/QuantEcon.py
quantecon/compute_fp.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/compute_fp.py#L285-L352
def _initialize_tableaux_ig(X, Y, tableaux, bases): """ Given sequences `X` and `Y` of ndarrays, initialize the tableau and basis arrays in place for the "geometric" imitation game as defined in McLennan and Tourky (2006), to be passed to `_lemke_howson_tbl`. Parameters ---------- X, Y : ndarray(float) Arrays of the same shape (m, n). tableaux : tuple(ndarray(float, ndim=2)) Tuple of two arrays to be used to store the tableaux, of shape (2m, 2m). Modified in place. bases : tuple(ndarray(int, ndim=1)) Tuple of two arrays to be used to store the bases, of shape (m,). Modified in place. Returns ------- tableaux : tuple(ndarray(float, ndim=2)) View to `tableaux`. bases : tuple(ndarray(int, ndim=1)) View to `bases`. """ m = X.shape[0] min_ = np.zeros(m) # Mover for i in range(m): for j in range(2*m): if j == i or j == i + m: tableaux[0][i, j] = 1 else: tableaux[0][i, j] = 0 # Right hand side tableaux[0][i, 2*m] = 1 # Imitator for i in range(m): # Slack variables for j in range(m): if j == i: tableaux[1][i, j] = 1 else: tableaux[1][i, j] = 0 # Payoff variables for j in range(m): d = X[i] - Y[j] tableaux[1][i, m+j] = _square_sum(d) * (-1) if tableaux[1][i, m+j] < min_[j]: min_[j] = tableaux[1][i, m+j] # Right hand side tableaux[1][i, 2*m] = 1 # Shift the payoff values for i in range(m): for j in range(m): tableaux[1][i, m+j] -= min_[j] tableaux[1][i, m+j] += 1 for pl, start in enumerate([m, 0]): for i in range(m): bases[pl][i] = start + i return tableaux, bases
[ "def", "_initialize_tableaux_ig", "(", "X", ",", "Y", ",", "tableaux", ",", "bases", ")", ":", "m", "=", "X", ".", "shape", "[", "0", "]", "min_", "=", "np", ".", "zeros", "(", "m", ")", "# Mover", "for", "i", "in", "range", "(", "m", ")", ":", "for", "j", "in", "range", "(", "2", "*", "m", ")", ":", "if", "j", "==", "i", "or", "j", "==", "i", "+", "m", ":", "tableaux", "[", "0", "]", "[", "i", ",", "j", "]", "=", "1", "else", ":", "tableaux", "[", "0", "]", "[", "i", ",", "j", "]", "=", "0", "# Right hand side", "tableaux", "[", "0", "]", "[", "i", ",", "2", "*", "m", "]", "=", "1", "# Imitator", "for", "i", "in", "range", "(", "m", ")", ":", "# Slack variables", "for", "j", "in", "range", "(", "m", ")", ":", "if", "j", "==", "i", ":", "tableaux", "[", "1", "]", "[", "i", ",", "j", "]", "=", "1", "else", ":", "tableaux", "[", "1", "]", "[", "i", ",", "j", "]", "=", "0", "# Payoff variables", "for", "j", "in", "range", "(", "m", ")", ":", "d", "=", "X", "[", "i", "]", "-", "Y", "[", "j", "]", "tableaux", "[", "1", "]", "[", "i", ",", "m", "+", "j", "]", "=", "_square_sum", "(", "d", ")", "*", "(", "-", "1", ")", "if", "tableaux", "[", "1", "]", "[", "i", ",", "m", "+", "j", "]", "<", "min_", "[", "j", "]", ":", "min_", "[", "j", "]", "=", "tableaux", "[", "1", "]", "[", "i", ",", "m", "+", "j", "]", "# Right hand side", "tableaux", "[", "1", "]", "[", "i", ",", "2", "*", "m", "]", "=", "1", "# Shift the payoff values", "for", "i", "in", "range", "(", "m", ")", ":", "for", "j", "in", "range", "(", "m", ")", ":", "tableaux", "[", "1", "]", "[", "i", ",", "m", "+", "j", "]", "-=", "min_", "[", "j", "]", "tableaux", "[", "1", "]", "[", "i", ",", "m", "+", "j", "]", "+=", "1", "for", "pl", ",", "start", "in", "enumerate", "(", "[", "m", ",", "0", "]", ")", ":", "for", "i", "in", "range", "(", "m", ")", ":", "bases", "[", "pl", "]", "[", "i", "]", "=", "start", "+", "i", "return", "tableaux", ",", "bases" ]
Given sequences `X` and `Y` of ndarrays, initialize the tableau and basis arrays in place for the "geometric" imitation game as defined in McLennan and Tourky (2006), to be passed to `_lemke_howson_tbl`. Parameters ---------- X, Y : ndarray(float) Arrays of the same shape (m, n). tableaux : tuple(ndarray(float, ndim=2)) Tuple of two arrays to be used to store the tableaux, of shape (2m, 2m). Modified in place. bases : tuple(ndarray(int, ndim=1)) Tuple of two arrays to be used to store the bases, of shape (m,). Modified in place. Returns ------- tableaux : tuple(ndarray(float, ndim=2)) View to `tableaux`. bases : tuple(ndarray(int, ndim=1)) View to `bases`.
[ "Given", "sequences", "X", "and", "Y", "of", "ndarrays", "initialize", "the", "tableau", "and", "basis", "arrays", "in", "place", "for", "the", "geometric", "imitation", "game", "as", "defined", "in", "McLennan", "and", "Tourky", "(", "2006", ")", "to", "be", "passed", "to", "_lemke_howson_tbl", "." ]
python
train
discogs/python-cas-client
cas_client/cas_client.py
https://github.com/discogs/python-cas-client/blob/f1efa2f49a22d43135014cb1b8d9dd3875304318/cas_client/cas_client.py#L85-L113
def get_api_url( self, api_resource, auth_token_ticket, authenticator, private_key, service_url=None, **kwargs ): ''' Build an auth-token-protected CAS API url. ''' auth_token, auth_token_signature = self._build_auth_token_data( auth_token_ticket, authenticator, private_key, **kwargs ) params = { 'at': auth_token, 'ats': auth_token_signature, } if service_url is not None: params['service'] = service_url url = '{}?{}'.format( self._get_api_url(api_resource), urlencode(params), ) return url
[ "def", "get_api_url", "(", "self", ",", "api_resource", ",", "auth_token_ticket", ",", "authenticator", ",", "private_key", ",", "service_url", "=", "None", ",", "*", "*", "kwargs", ")", ":", "auth_token", ",", "auth_token_signature", "=", "self", ".", "_build_auth_token_data", "(", "auth_token_ticket", ",", "authenticator", ",", "private_key", ",", "*", "*", "kwargs", ")", "params", "=", "{", "'at'", ":", "auth_token", ",", "'ats'", ":", "auth_token_signature", ",", "}", "if", "service_url", "is", "not", "None", ":", "params", "[", "'service'", "]", "=", "service_url", "url", "=", "'{}?{}'", ".", "format", "(", "self", ".", "_get_api_url", "(", "api_resource", ")", ",", "urlencode", "(", "params", ")", ",", ")", "return", "url" ]
Build an auth-token-protected CAS API url.
[ "Build", "an", "auth", "-", "token", "-", "protected", "CAS", "API", "url", "." ]
python
train
aegirhall/console-menu
consolemenu/console_menu.py
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L102-L114
def append_item(self, item): """ Add an item to the end of the menu before the exit item. Args: item (MenuItem): The item to be added. """ did_remove = self.remove_exit() item.menu = self self.items.append(item) if did_remove: self.add_exit()
[ "def", "append_item", "(", "self", ",", "item", ")", ":", "did_remove", "=", "self", ".", "remove_exit", "(", ")", "item", ".", "menu", "=", "self", "self", ".", "items", ".", "append", "(", "item", ")", "if", "did_remove", ":", "self", ".", "add_exit", "(", ")" ]
Add an item to the end of the menu before the exit item. Args: item (MenuItem): The item to be added.
[ "Add", "an", "item", "to", "the", "end", "of", "the", "menu", "before", "the", "exit", "item", "." ]
python
train
minhhoit/yacms
yacms/utils/docs.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/docs.py#L288-L302
def build_requirements(docs_path, package_name="yacms"): """ Updates the requirements file with yacms's version number. """ mezz_string = "yacms==" project_path = os.path.join(docs_path, "..") requirements_file = os.path.join(project_path, package_name, "project_template", "requirements.txt") with open(requirements_file, "r") as f: requirements = f.readlines() with open(requirements_file, "w") as f: f.write("yacms==%s\n" % __version__) for requirement in requirements: if requirement.strip() and not requirement.startswith(mezz_string): f.write(requirement)
[ "def", "build_requirements", "(", "docs_path", ",", "package_name", "=", "\"yacms\"", ")", ":", "mezz_string", "=", "\"yacms==\"", "project_path", "=", "os", ".", "path", ".", "join", "(", "docs_path", ",", "\"..\"", ")", "requirements_file", "=", "os", ".", "path", ".", "join", "(", "project_path", ",", "package_name", ",", "\"project_template\"", ",", "\"requirements.txt\"", ")", "with", "open", "(", "requirements_file", ",", "\"r\"", ")", "as", "f", ":", "requirements", "=", "f", ".", "readlines", "(", ")", "with", "open", "(", "requirements_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"yacms==%s\\n\"", "%", "__version__", ")", "for", "requirement", "in", "requirements", ":", "if", "requirement", ".", "strip", "(", ")", "and", "not", "requirement", ".", "startswith", "(", "mezz_string", ")", ":", "f", ".", "write", "(", "requirement", ")" ]
Updates the requirements file with yacms's version number.
[ "Updates", "the", "requirements", "file", "with", "yacms", "s", "version", "number", "." ]
python
train
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L248-L290
def pivot_table(self, index, columns, values='value', aggfunc='count', fill_value=None, style=None): """Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap' """ index = [index] if isstr(index) else index columns = [columns] if isstr(columns) else columns df = self.data # allow 'aggfunc' to be passed as string for easier user interface if isstr(aggfunc): if aggfunc == 'count': df = self.data.groupby(index + columns, as_index=False).count() fill_value = 0 elif aggfunc == 'mean': df = self.data.groupby(index + columns, as_index=False).mean()\ .round(2) aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" elif aggfunc == 'sum': aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value) return df
[ "def", "pivot_table", "(", "self", ",", "index", ",", "columns", ",", "values", "=", "'value'", ",", "aggfunc", "=", "'count'", ",", "fill_value", "=", "None", ",", "style", "=", "None", ")", ":", "index", "=", "[", "index", "]", "if", "isstr", "(", "index", ")", "else", "index", "columns", "=", "[", "columns", "]", "if", "isstr", "(", "columns", ")", "else", "columns", "df", "=", "self", ".", "data", "# allow 'aggfunc' to be passed as string for easier user interface", "if", "isstr", "(", "aggfunc", ")", ":", "if", "aggfunc", "==", "'count'", ":", "df", "=", "self", ".", "data", ".", "groupby", "(", "index", "+", "columns", ",", "as_index", "=", "False", ")", ".", "count", "(", ")", "fill_value", "=", "0", "elif", "aggfunc", "==", "'mean'", ":", "df", "=", "self", ".", "data", ".", "groupby", "(", "index", "+", "columns", ",", "as_index", "=", "False", ")", ".", "mean", "(", ")", ".", "round", "(", "2", ")", "aggfunc", "=", "np", ".", "sum", "fill_value", "=", "0", "if", "style", "==", "'heatmap'", "else", "\"\"", "elif", "aggfunc", "==", "'sum'", ":", "aggfunc", "=", "np", ".", "sum", "fill_value", "=", "0", "if", "style", "==", "'heatmap'", "else", "\"\"", "df", "=", "df", ".", "pivot_table", "(", "values", "=", "values", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "aggfunc", "=", "aggfunc", ",", "fill_value", "=", "fill_value", ")", "return", "df" ]
Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap'
[ "Returns", "a", "pivot", "table" ]
python
train
CZ-NIC/yangson
yangson/schemanode.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L619-L621
def _choice_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle choice statement.""" self._handle_child(ChoiceNode(), stmt, sctx)
[ "def", "_choice_stmt", "(", "self", ",", "stmt", ":", "Statement", ",", "sctx", ":", "SchemaContext", ")", "->", "None", ":", "self", ".", "_handle_child", "(", "ChoiceNode", "(", ")", ",", "stmt", ",", "sctx", ")" ]
Handle choice statement.
[ "Handle", "choice", "statement", "." ]
python
train
openstack/proliantutils
proliantutils/redfish/resources/manager/virtual_media.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/manager/virtual_media.py#L93-L106
def set_vm_status(self, boot_on_next_reset): """Set the Virtual Media drive status. :param boot_on_next_reset: boolean value :raises: SushyError, on an error from iLO. """ data = { "Oem": { "Hpe": { "BootOnNextServerReset": boot_on_next_reset } } } self._conn.patch(self.path, data=data)
[ "def", "set_vm_status", "(", "self", ",", "boot_on_next_reset", ")", ":", "data", "=", "{", "\"Oem\"", ":", "{", "\"Hpe\"", ":", "{", "\"BootOnNextServerReset\"", ":", "boot_on_next_reset", "}", "}", "}", "self", ".", "_conn", ".", "patch", "(", "self", ".", "path", ",", "data", "=", "data", ")" ]
Set the Virtual Media drive status. :param boot_on_next_reset: boolean value :raises: SushyError, on an error from iLO.
[ "Set", "the", "Virtual", "Media", "drive", "status", "." ]
python
train
pricingassistant/mrq
mrq/queue.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L211-L224
def all(cls): """ List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow. """ # Start with raw queues we know exist from the config queues = {x: 0 for x in Queue.get_queues_config()} stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([ {"$match": {"status": "queued"}}, {"$group": {"_id": "$queue", "jobs": {"$sum": 1}}} ])) queues.update({x["_id"]: x["jobs"] for x in stats}) return queues
[ "def", "all", "(", "cls", ")", ":", "# Start with raw queues we know exist from the config", "queues", "=", "{", "x", ":", "0", "for", "x", "in", "Queue", ".", "get_queues_config", "(", ")", "}", "stats", "=", "list", "(", "context", ".", "connections", ".", "mongodb_jobs", ".", "mrq_jobs", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "{", "\"status\"", ":", "\"queued\"", "}", "}", ",", "{", "\"$group\"", ":", "{", "\"_id\"", ":", "\"$queue\"", ",", "\"jobs\"", ":", "{", "\"$sum\"", ":", "1", "}", "}", "}", "]", ")", ")", "queues", ".", "update", "(", "{", "x", "[", "\"_id\"", "]", ":", "x", "[", "\"jobs\"", "]", "for", "x", "in", "stats", "}", ")", "return", "queues" ]
List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow.
[ "List", "all", "queues", "in", "MongoDB", "via", "aggregation", "with", "their", "queued", "jobs", "counts", ".", "Might", "be", "slow", "." ]
python
train