repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
edoburu/django-tag-parser
tag_parser/basetags.py
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L148-L158
def render(self, context): """ The default Django render() method for the tag. This method resolves the filter expressions, and calls :func:`render_tag`. """ # Resolve token kwargs tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs return self.render_tag(context, *tag_args, **tag_kwargs)
[ "def", "render", "(", "self", ",", "context", ")", ":", "# Resolve token kwargs", "tag_args", "=", "[", "expr", ".", "resolve", "(", "context", ")", "for", "expr", "in", "self", ".", "args", "]", "if", "self", ".", "compile_args", "else", "self", ".", "args", "tag_kwargs", "=", "dict", "(", "[", "(", "name", ",", "expr", ".", "resolve", "(", "context", ")", ")", "for", "name", ",", "expr", "in", "six", ".", "iteritems", "(", "self", ".", "kwargs", ")", "]", ")", "if", "self", ".", "compile_kwargs", "else", "self", ".", "kwargs", "return", "self", ".", "render_tag", "(", "context", ",", "*", "tag_args", ",", "*", "*", "tag_kwargs", ")" ]
The default Django render() method for the tag. This method resolves the filter expressions, and calls :func:`render_tag`.
[ "The", "default", "Django", "render", "()", "method", "for", "the", "tag", "." ]
python
test
47.454545
jonathf/chaospy
chaospy/distributions/operators/tan.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/tan.py#L39-L42
def _pdf(self, x, dist, cache): """Probability density function.""" return evaluation.evaluate_density( dist, numpy.arctan(x), cache=cache)/(1+x*x)
[ "def", "_pdf", "(", "self", ",", "x", ",", "dist", ",", "cache", ")", ":", "return", "evaluation", ".", "evaluate_density", "(", "dist", ",", "numpy", ".", "arctan", "(", "x", ")", ",", "cache", "=", "cache", ")", "/", "(", "1", "+", "x", "*", "x", ")" ]
Probability density function.
[ "Probability", "density", "function", "." ]
python
train
43
Azure/blobxfer
blobxfer/operations/synccopy.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/synccopy.py#L517-L532
def _finalize_nonblock_blob(self, sd, metadata, digest): # type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict, # str) -> None """Finalize Non-Block blob :param SyncCopy self: this :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor :param dict metadata: metadata dict :param str digest: md5 digest """ # set md5 page blob property if required if (blobxfer.util.is_not_empty(digest) or sd.dst_entity.cache_control is not None): self._set_blob_properties(sd, digest) # set metadata if needed if blobxfer.util.is_not_empty(metadata): self._set_blob_metadata(sd, metadata)
[ "def", "_finalize_nonblock_blob", "(", "self", ",", "sd", ",", "metadata", ",", "digest", ")", ":", "# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,", "# str) -> None", "# set md5 page blob property if required", "if", "(", "blobxfer", ".", "util", ".", "is_not_empty", "(", "digest", ")", "or", "sd", ".", "dst_entity", ".", "cache_control", "is", "not", "None", ")", ":", "self", ".", "_set_blob_properties", "(", "sd", ",", "digest", ")", "# set metadata if needed", "if", "blobxfer", ".", "util", ".", "is_not_empty", "(", "metadata", ")", ":", "self", ".", "_set_blob_metadata", "(", "sd", ",", "metadata", ")" ]
Finalize Non-Block blob :param SyncCopy self: this :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor :param dict metadata: metadata dict :param str digest: md5 digest
[ "Finalize", "Non", "-", "Block", "blob", ":", "param", "SyncCopy", "self", ":", "this", ":", "param", "blobxfer", ".", "models", ".", "synccopy", ".", "Descriptor", "sd", ":", "synccopy", "descriptor", ":", "param", "dict", "metadata", ":", "metadata", "dict", ":", "param", "str", "digest", ":", "md5", "digest" ]
python
train
44.9375
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L889-L928
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False): """ This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file """ [fs, x] = audioBasicIO.readAudioFile(fileName) x = audioBasicIO.stereo2mono(x) if storeStFeatures: [mtF, stF, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) else: [mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) # save mt features to numpy file numpy.save(outPutFile, mtF) if PLOT: print("Mid-term numpy file: " + outPutFile + ".npy saved") if storeToCSV: numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",") if PLOT: print("Mid-term CSV file: " + outPutFile + ".csv saved") if storeStFeatures: # save st features to numpy file numpy.save(outPutFile+"_st", stF) if PLOT: print("Short-term numpy file: " + outPutFile + "_st.npy saved") if storeToCSV: # store st features to CSV file numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") if PLOT: print("Short-term CSV file: " + outPutFile + "_st.csv saved")
[ "def", "mtFeatureExtractionToFile", "(", "fileName", ",", "midTermSize", ",", "midTermStep", ",", "shortTermSize", ",", "shortTermStep", ",", "outPutFile", ",", "storeStFeatures", "=", "False", ",", "storeToCSV", "=", "False", ",", "PLOT", "=", "False", ")", ":", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "fileName", ")", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "if", "storeStFeatures", ":", "[", "mtF", ",", "stF", ",", "_", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "fs", "*", "midTermSize", ")", ",", "round", "(", "fs", "*", "midTermStep", ")", ",", "round", "(", "fs", "*", "shortTermSize", ")", ",", "round", "(", "fs", "*", "shortTermStep", ")", ")", "else", ":", "[", "mtF", ",", "_", ",", "_", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "fs", "*", "midTermSize", ")", ",", "round", "(", "fs", "*", "midTermStep", ")", ",", "round", "(", "fs", "*", "shortTermSize", ")", ",", "round", "(", "fs", "*", "shortTermStep", ")", ")", "# save mt features to numpy file", "numpy", ".", "save", "(", "outPutFile", ",", "mtF", ")", "if", "PLOT", ":", "print", "(", "\"Mid-term numpy file: \"", "+", "outPutFile", "+", "\".npy saved\"", ")", "if", "storeToCSV", ":", "numpy", ".", "savetxt", "(", "outPutFile", "+", "\".csv\"", ",", "mtF", ".", "T", ",", "delimiter", "=", "\",\"", ")", "if", "PLOT", ":", "print", "(", "\"Mid-term CSV file: \"", "+", "outPutFile", "+", "\".csv saved\"", ")", "if", "storeStFeatures", ":", "# save st features to numpy file", "numpy", ".", "save", "(", "outPutFile", "+", "\"_st\"", ",", "stF", ")", "if", "PLOT", ":", "print", "(", "\"Short-term numpy file: \"", "+", "outPutFile", "+", "\"_st.npy saved\"", ")", "if", "storeToCSV", ":", "# store st features to CSV file", "numpy", ".", "savetxt", "(", "outPutFile", "+", "\"_st.csv\"", ",", "stF", ".", "T", ",", "delimiter", "=", "\",\"", ")", "if", "PLOT", ":", "print", "(", "\"Short-term CSV file: \"", "+", "outPutFile", "+", "\"_st.csv saved\"", ")" ]
This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file
[ "This", "function", "is", "used", "as", "a", "wrapper", "to", ":", "a", ")", "read", "the", "content", "of", "a", "WAV", "file", "b", ")", "perform", "mid", "-", "term", "feature", "extraction", "on", "that", "signal", "c", ")", "write", "the", "mid", "-", "term", "feature", "sequences", "to", "a", "numpy", "file" ]
python
train
45.725
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2959-L3018
def getscale(self): """Obtain the scale values along a dimension. Args:: no argument Returns:: list with the scale values; the list length is equal to the dimension length; the element type is equal to the dimension data type, as set when the 'setdimscale()' method was called. C library equivalent : SDgetdimscale """ # Get dimension info. If data_type is 0, no scale have been set # on the dimension. status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id) _checkErr('getscale', status, 'cannot execute') if data_type == 0: raise HDF4Error("no scale set on that dimension") # dim_size is 0 for an unlimited dimension. The actual length is # obtained through SDgetinfo. if dim_size == 0: dim_size = self._sds.info()[2][self._index] # Get scale values. if data_type in [SDC.UCHAR8, SDC.UINT8]: buf = _C.array_byte(dim_size) elif data_type == SDC.INT8: buf = _C.array_int8(dim_size) elif data_type == SDC.INT16: buf = _C.array_int16(dim_size) elif data_type == SDC.UINT16: buf = _C.array_uint16(dim_size) elif data_type == SDC.INT32: buf = _C.array_int32(dim_size) elif data_type == SDC.UINT32: buf = _C.array_uint32(dim_size) elif data_type == SDC.FLOAT32: buf = _C.array_float32(dim_size) elif data_type == SDC.FLOAT64: buf = _C.array_float64(dim_size) else: raise HDF4Error("getscale: dimension has an "\ "illegal or unsupported type %d" % data_type) status = _C.SDgetdimscale(self._id, buf) _checkErr('getscale', status, 'cannot execute') return _array_to_ret(buf, dim_size)
[ "def", "getscale", "(", "self", ")", ":", "# Get dimension info. If data_type is 0, no scale have been set", "# on the dimension.", "status", ",", "dim_name", ",", "dim_size", ",", "data_type", ",", "n_attrs", "=", "_C", ".", "SDdiminfo", "(", "self", ".", "_id", ")", "_checkErr", "(", "'getscale'", ",", "status", ",", "'cannot execute'", ")", "if", "data_type", "==", "0", ":", "raise", "HDF4Error", "(", "\"no scale set on that dimension\"", ")", "# dim_size is 0 for an unlimited dimension. The actual length is", "# obtained through SDgetinfo.", "if", "dim_size", "==", "0", ":", "dim_size", "=", "self", ".", "_sds", ".", "info", "(", ")", "[", "2", "]", "[", "self", ".", "_index", "]", "# Get scale values.", "if", "data_type", "in", "[", "SDC", ".", "UCHAR8", ",", "SDC", ".", "UINT8", "]", ":", "buf", "=", "_C", ".", "array_byte", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "INT8", ":", "buf", "=", "_C", ".", "array_int8", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "INT16", ":", "buf", "=", "_C", ".", "array_int16", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "UINT16", ":", "buf", "=", "_C", ".", "array_uint16", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "INT32", ":", "buf", "=", "_C", ".", "array_int32", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "UINT32", ":", "buf", "=", "_C", ".", "array_uint32", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "FLOAT32", ":", "buf", "=", "_C", ".", "array_float32", "(", "dim_size", ")", "elif", "data_type", "==", "SDC", ".", "FLOAT64", ":", "buf", "=", "_C", ".", "array_float64", "(", "dim_size", ")", "else", ":", "raise", "HDF4Error", "(", "\"getscale: dimension has an \"", "\"illegal or unsupported type %d\"", "%", "data_type", ")", "status", "=", "_C", ".", "SDgetdimscale", "(", "self", ".", "_id", ",", "buf", ")", "_checkErr", "(", "'getscale'", ",", "status", ",", "'cannot execute'", ")", "return", "_array_to_ret", "(", "buf", ",", "dim_size", ")" ]
Obtain the scale values along a dimension. Args:: no argument Returns:: list with the scale values; the list length is equal to the dimension length; the element type is equal to the dimension data type, as set when the 'setdimscale()' method was called. C library equivalent : SDgetdimscale
[ "Obtain", "the", "scale", "values", "along", "a", "dimension", "." ]
python
train
31.7
Capitains/MyCapytain
MyCapytain/resources/prototypes/cts/inventory.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L461-L466
def lang(self): """ Languages this text is in :return: List of available languages """ return str(self.graph.value(self.asNode(), DC.language))
[ "def", "lang", "(", "self", ")", ":", "return", "str", "(", "self", ".", "graph", ".", "value", "(", "self", ".", "asNode", "(", ")", ",", "DC", ".", "language", ")", ")" ]
Languages this text is in :return: List of available languages
[ "Languages", "this", "text", "is", "in" ]
python
train
28.5
grahambell/pymoc
lib/pymoc/io/json.py
https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/io/json.py#L42-L55
def read_moc_json(moc, filename=None, file=None): """Read JSON encoded data into a MOC. Either a filename, or an open file object can be specified. """ if file is not None: obj = _read_json(file) else: with open(filename, 'rb') as f: obj = _read_json(f) for (order, cells) in obj.items(): moc.add(order, cells)
[ "def", "read_moc_json", "(", "moc", ",", "filename", "=", "None", ",", "file", "=", "None", ")", ":", "if", "file", "is", "not", "None", ":", "obj", "=", "_read_json", "(", "file", ")", "else", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "obj", "=", "_read_json", "(", "f", ")", "for", "(", "order", ",", "cells", ")", "in", "obj", ".", "items", "(", ")", ":", "moc", ".", "add", "(", "order", ",", "cells", ")" ]
Read JSON encoded data into a MOC. Either a filename, or an open file object can be specified.
[ "Read", "JSON", "encoded", "data", "into", "a", "MOC", "." ]
python
train
25.714286
supercoderz/pyflightdata
pyflightdata/flightdata.py
https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L172-L191
def get_fleet(self, airline_key): """Get the fleet for a particular airline. Given a airline code form the get_airlines() method output, this method returns the fleet for the airline. Args: airline_key (str): The code for the airline on flightradar24 Returns: A list of dicts, one for each aircraft in the airlines fleet Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_fleet('ai-aic') """ url = AIRLINE_FLEET_BASE.format(airline_key) return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '')
[ "def", "get_fleet", "(", "self", ",", "airline_key", ")", ":", "url", "=", "AIRLINE_FLEET_BASE", ".", "format", "(", "airline_key", ")", "return", "self", ".", "_fr24", ".", "get_airline_fleet_data", "(", "url", ",", "self", ".", "AUTH_TOKEN", "!=", "''", ")" ]
Get the fleet for a particular airline. Given a airline code form the get_airlines() method output, this method returns the fleet for the airline. Args: airline_key (str): The code for the airline on flightradar24 Returns: A list of dicts, one for each aircraft in the airlines fleet Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_fleet('ai-aic')
[ "Get", "the", "fleet", "for", "a", "particular", "airline", "." ]
python
train
34.9
gbiggs/rtctree
rtctree/ports.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/ports.py#L83-L123
def connect(self, dests=[], name=None, id='', props={}): '''Connect this port to other ports. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Required values depend on the type of the two ports being connected. @raises IncompatibleDataPortConnectionPropsError, FailedToConnectError ''' with self._mutex: if self.porttype == 'DataInPort' or self.porttype == 'DataOutPort': for prop in props: if prop in self.properties: if props[prop] not in [x.strip() for x in self.properties[prop].split(',')] and \ 'any' not in self.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError for d in dests: if prop in d.properties: if props[prop] not in [x.strip() for x in d.properties[prop].split(',')] and \ 'any' not in d.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError if not name: name = self.name + '_'.join([d.name for d in dests]) props = utils.dict_to_nvlist(props) profile = RTC.ConnectorProfile(name, id, [self._obj] + [d._obj for d in dests], props) return_code, profile = self._obj.connect(profile) if return_code != RTC.RTC_OK: raise exceptions.FailedToConnectError(return_code) self.reparse_connections() for d in dests: d.reparse_connections()
[ "def", "connect", "(", "self", ",", "dests", "=", "[", "]", ",", "name", "=", "None", ",", "id", "=", "''", ",", "props", "=", "{", "}", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "porttype", "==", "'DataInPort'", "or", "self", ".", "porttype", "==", "'DataOutPort'", ":", "for", "prop", "in", "props", ":", "if", "prop", "in", "self", ".", "properties", ":", "if", "props", "[", "prop", "]", "not", "in", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "self", ".", "properties", "[", "prop", "]", ".", "split", "(", "','", ")", "]", "and", "'any'", "not", "in", "self", ".", "properties", "[", "prop", "]", ".", "lower", "(", ")", ":", "# Invalid property selected", "raise", "exceptions", ".", "IncompatibleDataPortConnectionPropsError", "for", "d", "in", "dests", ":", "if", "prop", "in", "d", ".", "properties", ":", "if", "props", "[", "prop", "]", "not", "in", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "d", ".", "properties", "[", "prop", "]", ".", "split", "(", "','", ")", "]", "and", "'any'", "not", "in", "d", ".", "properties", "[", "prop", "]", ".", "lower", "(", ")", ":", "# Invalid property selected", "raise", "exceptions", ".", "IncompatibleDataPortConnectionPropsError", "if", "not", "name", ":", "name", "=", "self", ".", "name", "+", "'_'", ".", "join", "(", "[", "d", ".", "name", "for", "d", "in", "dests", "]", ")", "props", "=", "utils", ".", "dict_to_nvlist", "(", "props", ")", "profile", "=", "RTC", ".", "ConnectorProfile", "(", "name", ",", "id", ",", "[", "self", ".", "_obj", "]", "+", "[", "d", ".", "_obj", "for", "d", "in", "dests", "]", ",", "props", ")", "return_code", ",", "profile", "=", "self", ".", "_obj", ".", "connect", "(", "profile", ")", "if", "return_code", "!=", "RTC", ".", "RTC_OK", ":", "raise", "exceptions", ".", "FailedToConnectError", "(", "return_code", ")", "self", ".", "reparse_connections", "(", ")", "for", "d", "in", "dests", ":", "d", ".", "reparse_connections", "(", ")" ]
Connect this port to other ports. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Required values depend on the type of the two ports being connected. @raises IncompatibleDataPortConnectionPropsError, FailedToConnectError
[ "Connect", "this", "port", "to", "other", "ports", "." ]
python
train
55
Woile/commitizen
commitizen/config.py
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/config.py#L54-L78
def read_raw_parser_conf(data: str) -> dict: """We expect to have a section like this ``` [commitizen] name = cz_jira files = [ "commitizen/__version__.py", "pyproject.toml" ] # this tab at the end is important ``` """ config = configparser.ConfigParser(allow_no_value=True) config.read_string(data) try: _data: dict = dict(config["commitizen"]) if "files" in _data: files = _data["files"] _f = json.loads(files) _data.update({"files": _f}) return _data except KeyError: return {}
[ "def", "read_raw_parser_conf", "(", "data", ":", "str", ")", "->", "dict", ":", "config", "=", "configparser", ".", "ConfigParser", "(", "allow_no_value", "=", "True", ")", "config", ".", "read_string", "(", "data", ")", "try", ":", "_data", ":", "dict", "=", "dict", "(", "config", "[", "\"commitizen\"", "]", ")", "if", "\"files\"", "in", "_data", ":", "files", "=", "_data", "[", "\"files\"", "]", "_f", "=", "json", ".", "loads", "(", "files", ")", "_data", ".", "update", "(", "{", "\"files\"", ":", "_f", "}", ")", "return", "_data", "except", "KeyError", ":", "return", "{", "}" ]
We expect to have a section like this ``` [commitizen] name = cz_jira files = [ "commitizen/__version__.py", "pyproject.toml" ] # this tab at the end is important ```
[ "We", "expect", "to", "have", "a", "section", "like", "this" ]
python
train
23.84
materialsproject/pymatgen-db
matgendb/vv/report.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/vv/report.py#L449-L465
def result_subsets(self, rs): """Break a result set into subsets with the same keys. :param rs: Result set, rows of a result as a list of dicts :type rs: list of dict :return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column """ keyset, maxwid = set(), {} for r in rs: key = tuple(sorted(r.keys())) keyset.add(key) if key not in maxwid: maxwid[key] = [len(k) for k in key] for i, k in enumerate(key): strlen = len("{}".format(r[k])) maxwid[key][i] = max(maxwid[key][i], strlen) return keyset, maxwid
[ "def", "result_subsets", "(", "self", ",", "rs", ")", ":", "keyset", ",", "maxwid", "=", "set", "(", ")", ",", "{", "}", "for", "r", "in", "rs", ":", "key", "=", "tuple", "(", "sorted", "(", "r", ".", "keys", "(", ")", ")", ")", "keyset", ".", "add", "(", "key", ")", "if", "key", "not", "in", "maxwid", ":", "maxwid", "[", "key", "]", "=", "[", "len", "(", "k", ")", "for", "k", "in", "key", "]", "for", "i", ",", "k", "in", "enumerate", "(", "key", ")", ":", "strlen", "=", "len", "(", "\"{}\"", ".", "format", "(", "r", "[", "k", "]", ")", ")", "maxwid", "[", "key", "]", "[", "i", "]", "=", "max", "(", "maxwid", "[", "key", "]", "[", "i", "]", ",", "strlen", ")", "return", "keyset", ",", "maxwid" ]
Break a result set into subsets with the same keys. :param rs: Result set, rows of a result as a list of dicts :type rs: list of dict :return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
[ "Break", "a", "result", "set", "into", "subsets", "with", "the", "same", "keys", "." ]
python
train
40.588235
bitesofcode/projexui
projexui/widgets/xlocationwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlocationwidget.py#L69-L77
def blockSignals( self, state ): """ Blocks the signals for this widget and its sub-parts. :param state | <bool> """ super(XLocationWidget, self).blockSignals(state) self._locationEdit.blockSignals(state) self._locationButton.blockSignals(state)
[ "def", "blockSignals", "(", "self", ",", "state", ")", ":", "super", "(", "XLocationWidget", ",", "self", ")", ".", "blockSignals", "(", "state", ")", "self", ".", "_locationEdit", ".", "blockSignals", "(", "state", ")", "self", ".", "_locationButton", ".", "blockSignals", "(", "state", ")" ]
Blocks the signals for this widget and its sub-parts. :param state | <bool>
[ "Blocks", "the", "signals", "for", "this", "widget", "and", "its", "sub", "-", "parts", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
35
tensorpack/tensorpack
tensorpack/utils/debug.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/debug.py#L8-L27
def enable_call_trace(): """ Enable trace for calls to any function. """ def tracer(frame, event, arg): if event == 'call': co = frame.f_code func_name = co.co_name if func_name == 'write' or func_name == 'print': # ignore write() calls from print statements return func_line_no = frame.f_lineno func_filename = co.co_filename caller = frame.f_back if caller: caller_line_no = caller.f_lineno caller_filename = caller.f_code.co_filename print('Call to `%s` on line %s:%s from %s:%s' % (func_name, func_filename, func_line_no, caller_filename, caller_line_no)) return sys.settrace(tracer)
[ "def", "enable_call_trace", "(", ")", ":", "def", "tracer", "(", "frame", ",", "event", ",", "arg", ")", ":", "if", "event", "==", "'call'", ":", "co", "=", "frame", ".", "f_code", "func_name", "=", "co", ".", "co_name", "if", "func_name", "==", "'write'", "or", "func_name", "==", "'print'", ":", "# ignore write() calls from print statements", "return", "func_line_no", "=", "frame", ".", "f_lineno", "func_filename", "=", "co", ".", "co_filename", "caller", "=", "frame", ".", "f_back", "if", "caller", ":", "caller_line_no", "=", "caller", ".", "f_lineno", "caller_filename", "=", "caller", ".", "f_code", ".", "co_filename", "print", "(", "'Call to `%s` on line %s:%s from %s:%s'", "%", "(", "func_name", ",", "func_filename", ",", "func_line_no", ",", "caller_filename", ",", "caller_line_no", ")", ")", "return", "sys", ".", "settrace", "(", "tracer", ")" ]
Enable trace for calls to any function.
[ "Enable", "trace", "for", "calls", "to", "any", "function", "." ]
python
train
40.45
markovmodel/PyEMMA
pyemma/_base/model.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/model.py#L55-L69
def _get_model_param_names(cls): r"""Get parameter names for the model""" # fetch model parameters if hasattr(cls, 'set_model_params'): # introspect the constructor arguments to find the model parameters # to represent args, varargs, kw, default = getargspec_no_self(cls.set_model_params) if varargs is not None: raise RuntimeError("PyEMMA models should always specify their parameters in the signature" " of their set_model_params (no varargs). %s doesn't follow this convention." % (cls,)) return args else: # No parameters known return []
[ "def", "_get_model_param_names", "(", "cls", ")", ":", "# fetch model parameters", "if", "hasattr", "(", "cls", ",", "'set_model_params'", ")", ":", "# introspect the constructor arguments to find the model parameters", "# to represent", "args", ",", "varargs", ",", "kw", ",", "default", "=", "getargspec_no_self", "(", "cls", ".", "set_model_params", ")", "if", "varargs", "is", "not", "None", ":", "raise", "RuntimeError", "(", "\"PyEMMA models should always specify their parameters in the signature\"", "\" of their set_model_params (no varargs). %s doesn't follow this convention.\"", "%", "(", "cls", ",", ")", ")", "return", "args", "else", ":", "# No parameters known", "return", "[", "]" ]
r"""Get parameter names for the model
[ "r", "Get", "parameter", "names", "for", "the", "model" ]
python
train
48.6
Nachtfeuer/pipeline
spline/tools/report/collector.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L229-L239
def count_stages(self, matrix_name): """ Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name. """ return len(self.data[matrix_name]) if matrix_name in self.data else 0
[ "def", "count_stages", "(", "self", ",", "matrix_name", ")", ":", "return", "len", "(", "self", ".", "data", "[", "matrix_name", "]", ")", "if", "matrix_name", "in", "self", ".", "data", "else", "0" ]
Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name.
[ "Number", "of", "registered", "stages", "for", "given", "matrix", "name", "." ]
python
train
31.090909
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L196-L199
def commit_fw_db_result(self): """Calls routine to update the FW create/delete result in DB. """ fw_dict = self.get_fw_dict() self.update_fw_db_result(fw_dict.get('fw_id'), fw_dict)
[ "def", "commit_fw_db_result", "(", "self", ")", ":", "fw_dict", "=", "self", ".", "get_fw_dict", "(", ")", "self", ".", "update_fw_db_result", "(", "fw_dict", ".", "get", "(", "'fw_id'", ")", ",", "fw_dict", ")" ]
Calls routine to update the FW create/delete result in DB.
[ "Calls", "routine", "to", "update", "the", "FW", "create", "/", "delete", "result", "in", "DB", "." ]
python
train
50.5
beetbox/audioread
audioread/macca.py
https://github.com/beetbox/audioread/blob/c8bedf7880f13a7b7488b108aaf245d648674818/audioread/macca.py#L215-L222
def _open_url(cls, url): """Given a CFURL Python object, return an opened ExtAudioFileRef. """ file_obj = ctypes.c_void_p() check(_coreaudio.ExtAudioFileOpenURL( url._obj, ctypes.byref(file_obj) )) return file_obj
[ "def", "_open_url", "(", "cls", ",", "url", ")", ":", "file_obj", "=", "ctypes", ".", "c_void_p", "(", ")", "check", "(", "_coreaudio", ".", "ExtAudioFileOpenURL", "(", "url", ".", "_obj", ",", "ctypes", ".", "byref", "(", "file_obj", ")", ")", ")", "return", "file_obj" ]
Given a CFURL Python object, return an opened ExtAudioFileRef.
[ "Given", "a", "CFURL", "Python", "object", "return", "an", "opened", "ExtAudioFileRef", "." ]
python
train
33.25
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L2089-L2113
def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a source file does not include its header.""" # Do not check test files fileinfo = FileInfo(filename) if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()): return for ext in GetHeaderExtensions(): basefilename = filename[0:len(filename) - len(fileinfo.Extension())] headerfile = basefilename + '.' + ext if not os.path.exists(headerfile): continue headername = FileInfo(headerfile).RepositoryName() first_include = None for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername))
[ "def", "CheckHeaderFileIncluded", "(", "filename", ",", "include_state", ",", "error", ")", ":", "# Do not check test files", "fileinfo", "=", "FileInfo", "(", "filename", ")", "if", "Search", "(", "_TEST_FILE_SUFFIX", ",", "fileinfo", ".", "BaseName", "(", ")", ")", ":", "return", "for", "ext", "in", "GetHeaderExtensions", "(", ")", ":", "basefilename", "=", "filename", "[", "0", ":", "len", "(", "filename", ")", "-", "len", "(", "fileinfo", ".", "Extension", "(", ")", ")", "]", "headerfile", "=", "basefilename", "+", "'.'", "+", "ext", "if", "not", "os", ".", "path", ".", "exists", "(", "headerfile", ")", ":", "continue", "headername", "=", "FileInfo", "(", "headerfile", ")", ".", "RepositoryName", "(", ")", "first_include", "=", "None", "for", "section_list", "in", "include_state", ".", "include_list", ":", "for", "f", "in", "section_list", ":", "if", "headername", "in", "f", "[", "0", "]", "or", "f", "[", "0", "]", "in", "headername", ":", "return", "if", "not", "first_include", ":", "first_include", "=", "f", "[", "1", "]", "error", "(", "filename", ",", "first_include", ",", "'build/include'", ",", "5", ",", "'%s should include its header file %s'", "%", "(", "fileinfo", ".", "RepositoryName", "(", ")", ",", "headername", ")", ")" ]
Logs an error if a source file does not include its header.
[ "Logs", "an", "error", "if", "a", "source", "file", "does", "not", "include", "its", "header", "." ]
python
valid
38.28
rocky/python-xasm
xasm/pyc_convert.py
https://github.com/rocky/python-xasm/blob/03e9576112934d00fbc70645b781ed7b3e3fcda1/xasm/pyc_convert.py#L53-L70
def transform_26_27(inst, new_inst, i, n, offset, instructions, new_asm): """Change JUMP_IF_FALSE and JUMP_IF_TRUE to POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE""" if inst.opname in ('JUMP_IF_FALSE', 'JUMP_IF_TRUE'): i += 1 assert i < n assert instructions[i].opname == 'POP_TOP' new_inst.offset = offset new_inst.opname = ( 'POP_JUMP_IF_FALSE' if inst.opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE' ) new_asm.backpatch[-1].remove(inst) new_inst.arg = 'L%d' % (inst.offset + inst.arg + 3) new_asm.backpatch[-1].add(new_inst) else: xlate26_27(new_inst) return xdis.op_size(new_inst.opcode, opcode_27)
[ "def", "transform_26_27", "(", "inst", ",", "new_inst", ",", "i", ",", "n", ",", "offset", ",", "instructions", ",", "new_asm", ")", ":", "if", "inst", ".", "opname", "in", "(", "'JUMP_IF_FALSE'", ",", "'JUMP_IF_TRUE'", ")", ":", "i", "+=", "1", "assert", "i", "<", "n", "assert", "instructions", "[", "i", "]", ".", "opname", "==", "'POP_TOP'", "new_inst", ".", "offset", "=", "offset", "new_inst", ".", "opname", "=", "(", "'POP_JUMP_IF_FALSE'", "if", "inst", ".", "opname", "==", "'JUMP_IF_FALSE'", "else", "'POP_JUMP_IF_TRUE'", ")", "new_asm", ".", "backpatch", "[", "-", "1", "]", ".", "remove", "(", "inst", ")", "new_inst", ".", "arg", "=", "'L%d'", "%", "(", "inst", ".", "offset", "+", "inst", ".", "arg", "+", "3", ")", "new_asm", ".", "backpatch", "[", "-", "1", "]", ".", "add", "(", "new_inst", ")", "else", ":", "xlate26_27", "(", "new_inst", ")", "return", "xdis", ".", "op_size", "(", "new_inst", ".", "opcode", ",", "opcode_27", ")" ]
Change JUMP_IF_FALSE and JUMP_IF_TRUE to POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
[ "Change", "JUMP_IF_FALSE", "and", "JUMP_IF_TRUE", "to", "POP_JUMP_IF_FALSE", "and", "POP_JUMP_IF_TRUE" ]
python
train
39.611111
fracpete/python-weka-wrapper3
python/weka/classifiers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L744-L756
def kernel(self, kernel): """ Sets the kernel. :param kernel: the kernel to set :type kernel: Kernel """ result = javabridge.static_call( "weka/classifiers/KernelHelper", "setKernel", "(Ljava/lang/Object;Lweka/classifiers/functions/supportVector/Kernel;)Z", self.jobject, kernel.jobject) if not result: raise Exception("Failed to set kernel!")
[ "def", "kernel", "(", "self", ",", "kernel", ")", ":", "result", "=", "javabridge", ".", "static_call", "(", "\"weka/classifiers/KernelHelper\"", ",", "\"setKernel\"", ",", "\"(Ljava/lang/Object;Lweka/classifiers/functions/supportVector/Kernel;)Z\"", ",", "self", ".", "jobject", ",", "kernel", ".", "jobject", ")", "if", "not", "result", ":", "raise", "Exception", "(", "\"Failed to set kernel!\"", ")" ]
Sets the kernel. :param kernel: the kernel to set :type kernel: Kernel
[ "Sets", "the", "kernel", "." ]
python
train
33.538462
arviz-devs/arviz
arviz/plots/plot_utils.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/plot_utils.py#L165-L190
def _create_axes_grid(length_plotters, rows, cols, **kwargs): """Create figure and axes for grids with multiple plots. Parameters ---------- n_items : int Number of panels required rows : int Number of rows cols : int Number of columns Returns ------- fig : matplotlib figure ax : matplotlib axes """ kwargs.setdefault("constrained_layout", True) fig, ax = plt.subplots(rows, cols, **kwargs) ax = np.ravel(ax) extra = (rows * cols) - length_plotters if extra: for i in range(1, extra + 1): ax[-i].set_axis_off() ax = ax[:-extra] return fig, ax
[ "def", "_create_axes_grid", "(", "length_plotters", ",", "rows", ",", "cols", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "\"constrained_layout\"", ",", "True", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "rows", ",", "cols", ",", "*", "*", "kwargs", ")", "ax", "=", "np", ".", "ravel", "(", "ax", ")", "extra", "=", "(", "rows", "*", "cols", ")", "-", "length_plotters", "if", "extra", ":", "for", "i", "in", "range", "(", "1", ",", "extra", "+", "1", ")", ":", "ax", "[", "-", "i", "]", ".", "set_axis_off", "(", ")", "ax", "=", "ax", "[", ":", "-", "extra", "]", "return", "fig", ",", "ax" ]
Create figure and axes for grids with multiple plots. Parameters ---------- n_items : int Number of panels required rows : int Number of rows cols : int Number of columns Returns ------- fig : matplotlib figure ax : matplotlib axes
[ "Create", "figure", "and", "axes", "for", "grids", "with", "multiple", "plots", "." ]
python
train
24.615385
project-ncl/pnc-cli
pnc_cli/buildconfigurations.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurations.py#L146-L156
def update_build_configuration(id, **kwargs): """ Update an existing BuildConfiguration with new information :param id: ID of BuildConfiguration to update :param name: Name of BuildConfiguration to update :return: """ data = update_build_configuration_raw(id, **kwargs) if data: return utils.format_json(data)
[ "def", "update_build_configuration", "(", "id", ",", "*", "*", "kwargs", ")", ":", "data", "=", "update_build_configuration_raw", "(", "id", ",", "*", "*", "kwargs", ")", "if", "data", ":", "return", "utils", ".", "format_json", "(", "data", ")" ]
Update an existing BuildConfiguration with new information :param id: ID of BuildConfiguration to update :param name: Name of BuildConfiguration to update :return:
[ "Update", "an", "existing", "BuildConfiguration", "with", "new", "information" ]
python
train
30.909091
diging/tethne
tethne/analyze/corpus.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/corpus.py#L227-L326
def sigma(G, corpus, featureset_name, B=None, **kwargs): """ Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_) for all of the nodes in a :class:`.GraphCollection`\. You can set parameters for burstness estimation using ``kwargs``: ========= =============================================================== Parameter Description ========= =============================================================== s Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. Defaults to 1.1. gamma Parameter that controls the 'cost' of higher burst states. Defaults to 1.0. k Number of burst states. Defaults to 5. ========= =============================================================== Parameters ---------- G : :class:`.GraphCollection` corpus : :class:`.Corpus` feature : str Name of a featureset in `corpus`. Examples -------- Assuming that you have a :class:`.Corpus` generated from WoS data that has been sliced by ``date``. .. code-block:: python >>> # Generate a co-citation graph collection. >>> from tethne import GraphCollection >>> kwargs = { 'threshold':2, 'topn':100 } >>> G = GraphCollection() >>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs) >>> # Calculate sigma. This may take several minutes, depending on the >>> # size of your co-citaiton graph collection. >>> from tethne.analyze.corpus import sigma >>> G = sigma(G, corpus, 'citations') >>> # Visualize... >>> from tethne.writers import collection >>> collection.to_dxgmml(G, '~/cocitation.xgmml') In the visualization below, node and label sizes are mapped to ``sigma``, and border width is mapped to ``citations``. .. figure:: _static/images/cocitation_sigma2.png :width: 600 :align: center """ if 'date' not in corpus.indices: corpus.index('date') # Calculate burstness if not provided. if not B: B = burstness(corpus, featureset_name, features=G.nodes(), **kwargs) Sigma = {} # Keys are dates (from GraphCollection), values are # node:sigma dicts. for key, graph in G.iteritems(): centrality = nx.betweenness_centrality(graph) sigma = {} # Sigma values for all features in this year. attrs = {} # Sigma values for only those features in this graph. for n_, burst in B.iteritems(): burst = dict(list(zip(*burst))) # Reorganize for easier lookup. # Nodes are indexed as integers in the GraphCollection. n = G.node_lookup[n_] # We have burstness values for years in which the feature ``n`` # occurs, and we have centrality values for years in which ``n`` # made it into the graph. if n in graph.nodes() and key in burst: sigma[n] = ((centrality[n] + 1.) ** burst[key]) - 1. attrs[n] = sigma[n] # Update graph with sigma values. nx.set_node_attributes(graph, 'sigma', attrs) Sigma[key] = sigma # Invert results and update the GraphCollection.master_graph. # TODO: is there a more efficient way to do this? inverse = defaultdict(dict) for gname, result in Sigma.iteritems(): if hasattr(result, '__iter__'): for n, val in result.iteritems(): inverse[n].update({gname: val}) nx.set_node_attributes(G.master_graph, 'sigma', inverse) # We want to return results in the same format as burstness(); with node # labels as keys; values are tuples ([years...], [sigma...]). return {n: list(zip(*G.node_history(G.node_lookup[n], 'sigma').items())) for n in B.keys()}
[ "def", "sigma", "(", "G", ",", "corpus", ",", "featureset_name", ",", "B", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "'date'", "not", "in", "corpus", ".", "indices", ":", "corpus", ".", "index", "(", "'date'", ")", "# Calculate burstness if not provided.", "if", "not", "B", ":", "B", "=", "burstness", "(", "corpus", ",", "featureset_name", ",", "features", "=", "G", ".", "nodes", "(", ")", ",", "*", "*", "kwargs", ")", "Sigma", "=", "{", "}", "# Keys are dates (from GraphCollection), values are", "# node:sigma dicts.", "for", "key", ",", "graph", "in", "G", ".", "iteritems", "(", ")", ":", "centrality", "=", "nx", ".", "betweenness_centrality", "(", "graph", ")", "sigma", "=", "{", "}", "# Sigma values for all features in this year.", "attrs", "=", "{", "}", "# Sigma values for only those features in this graph.", "for", "n_", ",", "burst", "in", "B", ".", "iteritems", "(", ")", ":", "burst", "=", "dict", "(", "list", "(", "zip", "(", "*", "burst", ")", ")", ")", "# Reorganize for easier lookup.", "# Nodes are indexed as integers in the GraphCollection.", "n", "=", "G", ".", "node_lookup", "[", "n_", "]", "# We have burstness values for years in which the feature ``n``", "# occurs, and we have centrality values for years in which ``n``", "# made it into the graph.", "if", "n", "in", "graph", ".", "nodes", "(", ")", "and", "key", "in", "burst", ":", "sigma", "[", "n", "]", "=", "(", "(", "centrality", "[", "n", "]", "+", "1.", ")", "**", "burst", "[", "key", "]", ")", "-", "1.", "attrs", "[", "n", "]", "=", "sigma", "[", "n", "]", "# Update graph with sigma values.", "nx", ".", "set_node_attributes", "(", "graph", ",", "'sigma'", ",", "attrs", ")", "Sigma", "[", "key", "]", "=", "sigma", "# Invert results and update the GraphCollection.master_graph.", "# TODO: is there a more efficient way to do this?", "inverse", "=", "defaultdict", "(", "dict", ")", "for", "gname", ",", "result", "in", "Sigma", ".", "iteritems", "(", ")", ":", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", ":", "for", "n", ",", "val", "in", "result", ".", "iteritems", "(", ")", ":", "inverse", "[", "n", "]", ".", "update", "(", "{", "gname", ":", "val", "}", ")", "nx", ".", "set_node_attributes", "(", "G", ".", "master_graph", ",", "'sigma'", ",", "inverse", ")", "# We want to return results in the same format as burstness(); with node", "# labels as keys; values are tuples ([years...], [sigma...]).", "return", "{", "n", ":", "list", "(", "zip", "(", "*", "G", ".", "node_history", "(", "G", ".", "node_lookup", "[", "n", "]", ",", "'sigma'", ")", ".", "items", "(", ")", ")", ")", "for", "n", "in", "B", ".", "keys", "(", ")", "}" ]
Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_) for all of the nodes in a :class:`.GraphCollection`\. You can set parameters for burstness estimation using ``kwargs``: ========= =============================================================== Parameter Description ========= =============================================================== s Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. Defaults to 1.1. gamma Parameter that controls the 'cost' of higher burst states. Defaults to 1.0. k Number of burst states. Defaults to 5. ========= =============================================================== Parameters ---------- G : :class:`.GraphCollection` corpus : :class:`.Corpus` feature : str Name of a featureset in `corpus`. Examples -------- Assuming that you have a :class:`.Corpus` generated from WoS data that has been sliced by ``date``. .. code-block:: python >>> # Generate a co-citation graph collection. >>> from tethne import GraphCollection >>> kwargs = { 'threshold':2, 'topn':100 } >>> G = GraphCollection() >>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs) >>> # Calculate sigma. This may take several minutes, depending on the >>> # size of your co-citaiton graph collection. >>> from tethne.analyze.corpus import sigma >>> G = sigma(G, corpus, 'citations') >>> # Visualize... >>> from tethne.writers import collection >>> collection.to_dxgmml(G, '~/cocitation.xgmml') In the visualization below, node and label sizes are mapped to ``sigma``, and border width is mapped to ``citations``. .. figure:: _static/images/cocitation_sigma2.png :width: 600 :align: center
[ "Calculate", "sigma", "(", "from", "Chen", "2009", "<http", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "0904", ".", "1439", ".", "pdf", ">", "_", ")", "for", "all", "of", "the", "nodes", "in", "a", ":", "class", ":", ".", "GraphCollection", "\\", "." ]
python
train
38.53
hobson/pug-invest
pug/invest/sandbox/sim.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L263-L273
def symbol_bollinger(symbol='GOOG', start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe, window=20, sigma=1.): """Calculate the Bolinger indicator value >>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE -1.8782... """ symbols = normalize_symbols(symbol) prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner) return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False)
[ "def", "symbol_bollinger", "(", "symbol", "=", "'GOOG'", ",", "start", "=", "datetime", ".", "datetime", "(", "2008", ",", "1", ",", "1", ")", ",", "end", "=", "datetime", ".", "datetime", "(", "2009", ",", "12", ",", "31", ")", ",", "price_type", "=", "'close'", ",", "cleaner", "=", "clean_dataframe", ",", "window", "=", "20", ",", "sigma", "=", "1.", ")", ":", "symbols", "=", "normalize_symbols", "(", "symbol", ")", "prices", "=", "price_dataframe", "(", "symbols", ",", "start", "=", "start", ",", "end", "=", "end", ",", "price_type", "=", "price_type", ",", "cleaner", "=", "cleaner", ")", "return", "series_bollinger", "(", "prices", "[", "symbols", "[", "0", "]", "]", ",", "window", "=", "window", ",", "sigma", "=", "sigma", ",", "plot", "=", "False", ")" ]
Calculate the Bolinger indicator value >>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE -1.8782...
[ "Calculate", "the", "Bolinger", "indicator", "value" ]
python
train
52.545455
vertexproject/synapse
synapse/lib/link.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/link.py#L39-L47
async def unixlisten(path, onlink): ''' Start an PF_UNIX server listening on the given path. ''' info = {'path': path, 'unix': True} async def onconn(reader, writer): link = await Link.anit(reader, writer, info=info) link.schedCoro(onlink(link)) return await asyncio.start_unix_server(onconn, path=path)
[ "async", "def", "unixlisten", "(", "path", ",", "onlink", ")", ":", "info", "=", "{", "'path'", ":", "path", ",", "'unix'", ":", "True", "}", "async", "def", "onconn", "(", "reader", ",", "writer", ")", ":", "link", "=", "await", "Link", ".", "anit", "(", "reader", ",", "writer", ",", "info", "=", "info", ")", "link", ".", "schedCoro", "(", "onlink", "(", "link", ")", ")", "return", "await", "asyncio", ".", "start_unix_server", "(", "onconn", ",", "path", "=", "path", ")" ]
Start an PF_UNIX server listening on the given path.
[ "Start", "an", "PF_UNIX", "server", "listening", "on", "the", "given", "path", "." ]
python
train
37.222222
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L1652-L1658
def set_comment(self, format, *args): """ Add comment to config item before saving to disk. You can add as many comment lines as you like. If you use a null format, all comments are deleted. """ return lib.zconfig_set_comment(self._as_parameter_, format, *args)
[ "def", "set_comment", "(", "self", ",", "format", ",", "*", "args", ")", ":", "return", "lib", ".", "zconfig_set_comment", "(", "self", ".", "_as_parameter_", ",", "format", ",", "*", "args", ")" ]
Add comment to config item before saving to disk. You can add as many comment lines as you like. If you use a null format, all comments are deleted.
[ "Add", "comment", "to", "config", "item", "before", "saving", "to", "disk", ".", "You", "can", "add", "as", "many", "comment", "lines", "as", "you", "like", ".", "If", "you", "use", "a", "null", "format", "all", "comments", "are", "deleted", "." ]
python
train
41
pgmpy/pgmpy
pgmpy/inference/ExactInference.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/inference/ExactInference.py#L533-L627
def _query(self, variables, operation, evidence=None, joint=True): """ This is a generalized query method that can be used for both query and map query. Parameters ---------- variables: list list of variables for which you want to compute the probability operation: str ('marginalize' | 'maximize') The operation to do for passing messages between nodes. evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.inference import BeliefPropagation >>> from pgmpy.models import BayesianModel >>> import numpy as np >>> import pandas as pd >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> inference = BeliefPropagation(model) >>> phi_query = inference.query(['A', 'B']) References ---------- Algorithm 10.4 Out-of-clique inference in clique tree Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman. """ is_calibrated = self._is_converged(operation=operation) # Calibrate the junction tree if not calibrated if not is_calibrated: self.calibrate() if not isinstance(variables, (list, tuple, set)): query_variables = [variables] else: query_variables = list(variables) query_variables.extend(evidence.keys() if evidence else []) # Find a tree T' such that query_variables are a subset of scope(T') nodes_with_query_variables = set() for var in query_variables: nodes_with_query_variables.update(filter(lambda x: var in x, self.junction_tree.nodes())) subtree_nodes = nodes_with_query_variables # Conversion of set to tuple just for indexing nodes_with_query_variables = tuple(nodes_with_query_variables) # As junction tree is a tree, that means that there would be only path between any two nodes in the tree # thus we can just take the path between any two nodes; no matter there order is for i in range(len(nodes_with_query_variables) - 1): subtree_nodes.update(nx.shortest_path(self.junction_tree, nodes_with_query_variables[i], nodes_with_query_variables[i + 1])) subtree_undirected_graph = self.junction_tree.subgraph(subtree_nodes) # Converting subtree into a junction tree if len(subtree_nodes) == 1: subtree = JunctionTree() subtree.add_node(subtree_nodes.pop()) else: subtree = JunctionTree(subtree_undirected_graph.edges()) # Selecting a node is root node. Root node would be having only one neighbor if len(subtree.nodes()) == 1: root_node = list(subtree.nodes())[0] else: root_node = tuple(filter(lambda x: len(list(subtree.neighbors(x))) == 1, subtree.nodes()))[0] clique_potential_list = [self.clique_beliefs[root_node]] # For other nodes in the subtree compute the clique potentials as follows # As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with' # all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the # internal elements within it. parent_nodes = set([root_node]) nodes_traversed = set() while parent_nodes: parent_node = parent_nodes.pop() for child_node in set(subtree.neighbors(parent_node)) - nodes_traversed: clique_potential_list.append(self.clique_beliefs[child_node] / self.sepset_beliefs[frozenset([parent_node, child_node])]) parent_nodes.update([child_node]) nodes_traversed.update([parent_node]) # Add factors to the corresponding junction tree subtree.add_factors(*clique_potential_list) # Sum product variable elimination on the subtree variable_elimination = VariableElimination(subtree) if operation == 'marginalize': return variable_elimination.query(variables=variables, evidence=evidence, joint=joint) elif operation == 'maximize': return variable_elimination.map_query(variables=variables, evidence=evidence)
[ "def", "_query", "(", "self", ",", "variables", ",", "operation", ",", "evidence", "=", "None", ",", "joint", "=", "True", ")", ":", "is_calibrated", "=", "self", ".", "_is_converged", "(", "operation", "=", "operation", ")", "# Calibrate the junction tree if not calibrated", "if", "not", "is_calibrated", ":", "self", ".", "calibrate", "(", ")", "if", "not", "isinstance", "(", "variables", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "query_variables", "=", "[", "variables", "]", "else", ":", "query_variables", "=", "list", "(", "variables", ")", "query_variables", ".", "extend", "(", "evidence", ".", "keys", "(", ")", "if", "evidence", "else", "[", "]", ")", "# Find a tree T' such that query_variables are a subset of scope(T')", "nodes_with_query_variables", "=", "set", "(", ")", "for", "var", "in", "query_variables", ":", "nodes_with_query_variables", ".", "update", "(", "filter", "(", "lambda", "x", ":", "var", "in", "x", ",", "self", ".", "junction_tree", ".", "nodes", "(", ")", ")", ")", "subtree_nodes", "=", "nodes_with_query_variables", "# Conversion of set to tuple just for indexing", "nodes_with_query_variables", "=", "tuple", "(", "nodes_with_query_variables", ")", "# As junction tree is a tree, that means that there would be only path between any two nodes in the tree", "# thus we can just take the path between any two nodes; no matter there order is", "for", "i", "in", "range", "(", "len", "(", "nodes_with_query_variables", ")", "-", "1", ")", ":", "subtree_nodes", ".", "update", "(", "nx", ".", "shortest_path", "(", "self", ".", "junction_tree", ",", "nodes_with_query_variables", "[", "i", "]", ",", "nodes_with_query_variables", "[", "i", "+", "1", "]", ")", ")", "subtree_undirected_graph", "=", "self", ".", "junction_tree", ".", "subgraph", "(", "subtree_nodes", ")", "# Converting subtree into a junction tree", "if", "len", "(", "subtree_nodes", ")", "==", "1", ":", "subtree", "=", "JunctionTree", "(", ")", "subtree", ".", "add_node", "(", "subtree_nodes", ".", "pop", "(", ")", ")", "else", ":", "subtree", "=", "JunctionTree", "(", "subtree_undirected_graph", ".", "edges", "(", ")", ")", "# Selecting a node is root node. Root node would be having only one neighbor", "if", "len", "(", "subtree", ".", "nodes", "(", ")", ")", "==", "1", ":", "root_node", "=", "list", "(", "subtree", ".", "nodes", "(", ")", ")", "[", "0", "]", "else", ":", "root_node", "=", "tuple", "(", "filter", "(", "lambda", "x", ":", "len", "(", "list", "(", "subtree", ".", "neighbors", "(", "x", ")", ")", ")", "==", "1", ",", "subtree", ".", "nodes", "(", ")", ")", ")", "[", "0", "]", "clique_potential_list", "=", "[", "self", ".", "clique_beliefs", "[", "root_node", "]", "]", "# For other nodes in the subtree compute the clique potentials as follows", "# As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with'", "# all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the", "# internal elements within it.", "parent_nodes", "=", "set", "(", "[", "root_node", "]", ")", "nodes_traversed", "=", "set", "(", ")", "while", "parent_nodes", ":", "parent_node", "=", "parent_nodes", ".", "pop", "(", ")", "for", "child_node", "in", "set", "(", "subtree", ".", "neighbors", "(", "parent_node", ")", ")", "-", "nodes_traversed", ":", "clique_potential_list", ".", "append", "(", "self", ".", "clique_beliefs", "[", "child_node", "]", "/", "self", ".", "sepset_beliefs", "[", "frozenset", "(", "[", "parent_node", ",", "child_node", "]", ")", "]", ")", "parent_nodes", ".", "update", "(", "[", "child_node", "]", ")", "nodes_traversed", ".", "update", "(", "[", "parent_node", "]", ")", "# Add factors to the corresponding junction tree", "subtree", ".", "add_factors", "(", "*", "clique_potential_list", ")", "# Sum product variable elimination on the subtree", "variable_elimination", "=", "VariableElimination", "(", "subtree", ")", "if", "operation", "==", "'marginalize'", ":", "return", "variable_elimination", ".", "query", "(", "variables", "=", "variables", ",", "evidence", "=", "evidence", ",", "joint", "=", "joint", ")", "elif", "operation", "==", "'maximize'", ":", "return", "variable_elimination", ".", "map_query", "(", "variables", "=", "variables", ",", "evidence", "=", "evidence", ")" ]
This is a generalized query method that can be used for both query and map query. Parameters ---------- variables: list list of variables for which you want to compute the probability operation: str ('marginalize' | 'maximize') The operation to do for passing messages between nodes. evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.inference import BeliefPropagation >>> from pgmpy.models import BayesianModel >>> import numpy as np >>> import pandas as pd >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> inference = BeliefPropagation(model) >>> phi_query = inference.query(['A', 'B']) References ---------- Algorithm 10.4 Out-of-clique inference in clique tree Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
[ "This", "is", "a", "generalized", "query", "method", "that", "can", "be", "used", "for", "both", "query", "and", "map", "query", "." ]
python
train
48.326316
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4188-L4193
def getCurrentFadeColor(self, bBackground): """Get current fade color value.""" fn = self.function_table.getCurrentFadeColor result = fn(bBackground) return result
[ "def", "getCurrentFadeColor", "(", "self", ",", "bBackground", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getCurrentFadeColor", "result", "=", "fn", "(", "bBackground", ")", "return", "result" ]
Get current fade color value.
[ "Get", "current", "fade", "color", "value", "." ]
python
train
31.833333
google/grr
grr/client/grr_response_client/client_utils_common.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils_common.py#L35-L86
def Execute(cmd, args, time_limit=-1, bypass_whitelist=False, daemon=False, use_client_context=False, cwd=None): """Executes commands on the client. This function is the only place where commands will be executed by the GRR client. This makes sure that all issued commands are compared to a white list and no malicious commands are issued on the client machine. Args: cmd: The command to be executed. args: List of arguments. time_limit: Time in seconds the process is allowed to run. bypass_whitelist: Allow execution of things that are not in the whitelist. Note that this should only ever be called on a binary that passes the VerifySignedBlob check. daemon: Start the new process in the background. use_client_context: Run this script in the client's context. Defaults to system context. cwd: Current working directory for the command. Returns: A tuple of stdout, stderr, return value and time taken. """ if not bypass_whitelist and not IsExecutionWhitelisted(cmd, args): # Whitelist doesn't contain this cmd/arg pair logging.info("Execution disallowed by whitelist: %s %s.", cmd, " ".join(args)) return (b"", b"Execution disallowed by whitelist.", -1, -1) if daemon: pid = os.fork() if pid == 0: # This is the child, it will run the daemon process. We call os.setsid # here to become the session leader of this new session and the process # group leader of the new process group so we don't get killed when the # main process exits. try: os.setsid() except OSError: # This only works if the process is running as root. pass _Execute( cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd) os._exit(0) # pylint: disable=protected-access else: return _Execute( cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)
[ "def", "Execute", "(", "cmd", ",", "args", ",", "time_limit", "=", "-", "1", ",", "bypass_whitelist", "=", "False", ",", "daemon", "=", "False", ",", "use_client_context", "=", "False", ",", "cwd", "=", "None", ")", ":", "if", "not", "bypass_whitelist", "and", "not", "IsExecutionWhitelisted", "(", "cmd", ",", "args", ")", ":", "# Whitelist doesn't contain this cmd/arg pair", "logging", ".", "info", "(", "\"Execution disallowed by whitelist: %s %s.\"", ",", "cmd", ",", "\" \"", ".", "join", "(", "args", ")", ")", "return", "(", "b\"\"", ",", "b\"Execution disallowed by whitelist.\"", ",", "-", "1", ",", "-", "1", ")", "if", "daemon", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", "==", "0", ":", "# This is the child, it will run the daemon process. We call os.setsid", "# here to become the session leader of this new session and the process", "# group leader of the new process group so we don't get killed when the", "# main process exits.", "try", ":", "os", ".", "setsid", "(", ")", "except", "OSError", ":", "# This only works if the process is running as root.", "pass", "_Execute", "(", "cmd", ",", "args", ",", "time_limit", ",", "use_client_context", "=", "use_client_context", ",", "cwd", "=", "cwd", ")", "os", ".", "_exit", "(", "0", ")", "# pylint: disable=protected-access", "else", ":", "return", "_Execute", "(", "cmd", ",", "args", ",", "time_limit", ",", "use_client_context", "=", "use_client_context", ",", "cwd", "=", "cwd", ")" ]
Executes commands on the client. This function is the only place where commands will be executed by the GRR client. This makes sure that all issued commands are compared to a white list and no malicious commands are issued on the client machine. Args: cmd: The command to be executed. args: List of arguments. time_limit: Time in seconds the process is allowed to run. bypass_whitelist: Allow execution of things that are not in the whitelist. Note that this should only ever be called on a binary that passes the VerifySignedBlob check. daemon: Start the new process in the background. use_client_context: Run this script in the client's context. Defaults to system context. cwd: Current working directory for the command. Returns: A tuple of stdout, stderr, return value and time taken.
[ "Executes", "commands", "on", "the", "client", "." ]
python
train
38.019231
CivicSpleen/ambry
ambry/orm/database.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L482-L531
def dataset(self, ref, load_all=False, exception=True): """Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset` """ ref = str(ref) try: ds = self.session.query(Dataset).filter(Dataset.vid == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.id == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if not ds: try: ds = self.session.query(Dataset).filter(Dataset.vname == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.name == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if ds: ds._database = self return ds elif exception: raise NotFoundError('No dataset in library for vid : {} '.format(ref)) else: return None
[ "def", "dataset", "(", "self", ",", "ref", ",", "load_all", "=", "False", ",", "exception", "=", "True", ")", ":", "ref", "=", "str", "(", "ref", ")", "try", ":", "ds", "=", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "vid", "==", "ref", ")", ".", "one", "(", ")", "except", "NoResultFound", ":", "ds", "=", "None", "if", "not", "ds", ":", "try", ":", "ds", "=", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "id", "==", "ref", ")", ".", "order_by", "(", "Dataset", ".", "revision", ".", "desc", "(", ")", ")", ".", "first", "(", ")", "except", "NoResultFound", ":", "ds", "=", "None", "if", "not", "ds", ":", "try", ":", "ds", "=", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "vname", "==", "ref", ")", ".", "one", "(", ")", "except", "NoResultFound", ":", "ds", "=", "None", "if", "not", "ds", ":", "try", ":", "ds", "=", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "name", "==", "ref", ")", ".", "order_by", "(", "Dataset", ".", "revision", ".", "desc", "(", ")", ")", ".", "first", "(", ")", "except", "NoResultFound", ":", "ds", "=", "None", "if", "ds", ":", "ds", ".", "_database", "=", "self", "return", "ds", "elif", "exception", ":", "raise", "NotFoundError", "(", "'No dataset in library for vid : {} '", ".", "format", "(", "ref", ")", ")", "else", ":", "return", "None" ]
Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset`
[ "Return", "a", "dataset", "given", "a", "vid", "or", "id" ]
python
train
29.88
bitesofcode/projexui
projexui/widgets/xorbbrowserwidget/xorbbrowserfactory.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbbrowserwidget/xorbbrowserfactory.py#L77-L92
def createCard( self, parent, record ): """ Creates a new widget that will represent the card view for the inpued record. :param parent | <QWidget> record | <orb.Table> :return <QWidget> || None """ cls = self.cardClass(record) if ( cls ): card = cls(parent) card.setRecord(record) return card return None
[ "def", "createCard", "(", "self", ",", "parent", ",", "record", ")", ":", "cls", "=", "self", ".", "cardClass", "(", "record", ")", "if", "(", "cls", ")", ":", "card", "=", "cls", "(", "parent", ")", "card", ".", "setRecord", "(", "record", ")", "return", "card", "return", "None" ]
Creates a new widget that will represent the card view for the inpued record. :param parent | <QWidget> record | <orb.Table> :return <QWidget> || None
[ "Creates", "a", "new", "widget", "that", "will", "represent", "the", "card", "view", "for", "the", "inpued", "record", ".", ":", "param", "parent", "|", "<QWidget", ">", "record", "|", "<orb", ".", "Table", ">", ":", "return", "<QWidget", ">", "||", "None" ]
python
train
28.75
bokeh/bokeh
bokeh/core/property/descriptors.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/descriptors.py#L671-L697
def _get(self, obj): ''' Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|. ''' if not hasattr(obj, '_property_values'): raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" % (self.name, obj.__class__.__name__)) if self.name not in obj._property_values: return self._get_default(obj) else: return obj._property_values[self.name]
[ "def", "_get", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_property_values'", ")", ":", "raise", "RuntimeError", "(", "\"Cannot get a property value '%s' from a %s instance before HasProps.__init__\"", "%", "(", "self", ".", "name", ",", "obj", ".", "__class__", ".", "__name__", ")", ")", "if", "self", ".", "name", "not", "in", "obj", ".", "_property_values", ":", "return", "self", ".", "_get_default", "(", "obj", ")", "else", ":", "return", "obj", ".", "_property_values", "[", "self", ".", "name", "]" ]
Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|.
[ "Internal", "implementation", "of", "instance", "attribute", "access", "for", "the", "BasicPropertyDescriptor", "getter", "." ]
python
train
35.296296
nateshmbhat/pyttsx3
pyttsx3/driver.py
https://github.com/nateshmbhat/pyttsx3/blob/0f304bff4812d50937393f1e3d7f89c9862a1623/pyttsx3/driver.py#L178-L187
def save_to_file(self, text, filename, name): ''' Called by the engine to push a say command onto the queue. @param text: Text to speak @type text: unicode @param name: Name to associate with the utterance @type name: str ''' self._push(self._driver.save_to_file, (text, filename), name)
[ "def", "save_to_file", "(", "self", ",", "text", ",", "filename", ",", "name", ")", ":", "self", ".", "_push", "(", "self", ".", "_driver", ".", "save_to_file", ",", "(", "text", ",", "filename", ")", ",", "name", ")" ]
Called by the engine to push a say command onto the queue. @param text: Text to speak @type text: unicode @param name: Name to associate with the utterance @type name: str
[ "Called", "by", "the", "engine", "to", "push", "a", "say", "command", "onto", "the", "queue", "." ]
python
train
34.3
push-things/django-th
th_trello/my_trello.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_trello/my_trello.py#L89-L153
def save_data(self, trigger_id, **data): """ let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean """ data['output_format'] = 'md' title, content = super(ServiceTrello, self).save_data(trigger_id, **data) if len(title): # get the data of this trigger t = Trello.objects.get(trigger_id=trigger_id) # footer of the card footer = self.set_card_footer(data, t) content += footer # 1 - we need to search the list and board where we will # store the card so ... # 1.a search the board_id by its name # by retrieving all the boards boards = self.trello_instance.list_boards() board_id = '' my_list = '' for board in boards: if t.board_name == board.name: board_id = board.id break if board_id: # 1.b search the list_id by its name my_board = self.trello_instance.get_board(board_id) lists = my_board.open_lists() # just get the open list ; not all the archive ones for list_in_board in lists: # search the name of the list we set in the form if t.list_name == list_in_board.name: # return the (trello) list object to be able to add card at step 3 my_list = my_board.get_list(list_in_board.id) break # we didnt find the list in that board -> create it if my_list == '': my_list = my_board.add_list(t.list_name) else: # 2 if board_id and/or list_id does not exist, create it/them my_board = self.trello_instance.add_board(t.board_name) # add the list that didn't exists and return a (trello) list object my_list = my_board.add_list(t.list_name) # 3 create the card my_list.add_card(title, content) logger.debug(str('trello {} created').format(data['link'])) status = True else: sentence = "no token or link provided for trigger ID {}".format(trigger_id) update_result(trigger_id, msg=sentence, status=False) status = False return status
[ "def", "save_data", "(", "self", ",", "trigger_id", ",", "*", "*", "data", ")", ":", "data", "[", "'output_format'", "]", "=", "'md'", "title", ",", "content", "=", "super", "(", "ServiceTrello", ",", "self", ")", ".", "save_data", "(", "trigger_id", ",", "*", "*", "data", ")", "if", "len", "(", "title", ")", ":", "# get the data of this trigger", "t", "=", "Trello", ".", "objects", ".", "get", "(", "trigger_id", "=", "trigger_id", ")", "# footer of the card", "footer", "=", "self", ".", "set_card_footer", "(", "data", ",", "t", ")", "content", "+=", "footer", "# 1 - we need to search the list and board where we will", "# store the card so ...", "# 1.a search the board_id by its name", "# by retrieving all the boards", "boards", "=", "self", ".", "trello_instance", ".", "list_boards", "(", ")", "board_id", "=", "''", "my_list", "=", "''", "for", "board", "in", "boards", ":", "if", "t", ".", "board_name", "==", "board", ".", "name", ":", "board_id", "=", "board", ".", "id", "break", "if", "board_id", ":", "# 1.b search the list_id by its name", "my_board", "=", "self", ".", "trello_instance", ".", "get_board", "(", "board_id", ")", "lists", "=", "my_board", ".", "open_lists", "(", ")", "# just get the open list ; not all the archive ones", "for", "list_in_board", "in", "lists", ":", "# search the name of the list we set in the form", "if", "t", ".", "list_name", "==", "list_in_board", ".", "name", ":", "# return the (trello) list object to be able to add card at step 3", "my_list", "=", "my_board", ".", "get_list", "(", "list_in_board", ".", "id", ")", "break", "# we didnt find the list in that board -> create it", "if", "my_list", "==", "''", ":", "my_list", "=", "my_board", ".", "add_list", "(", "t", ".", "list_name", ")", "else", ":", "# 2 if board_id and/or list_id does not exist, create it/them", "my_board", "=", "self", ".", "trello_instance", ".", "add_board", "(", "t", ".", "board_name", ")", "# add the list that didn't exists and return a (trello) list object", "my_list", "=", "my_board", ".", "add_list", "(", "t", ".", "list_name", ")", "# 3 create the card", "my_list", ".", "add_card", "(", "title", ",", "content", ")", "logger", ".", "debug", "(", "str", "(", "'trello {} created'", ")", ".", "format", "(", "data", "[", "'link'", "]", ")", ")", "status", "=", "True", "else", ":", "sentence", "=", "\"no token or link provided for trigger ID {}\"", ".", "format", "(", "trigger_id", ")", "update_result", "(", "trigger_id", ",", "msg", "=", "sentence", ",", "status", "=", "False", ")", "status", "=", "False", "return", "status" ]
let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean
[ "let", "s", "save", "the", "data" ]
python
train
40.138462
grahambell/pymoc
lib/pymoc/util/tool.py
https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/util/tool.py#L273-L288
def intersection(self): """Compute the intersection with the given MOC. This command takes the name of a MOC file and forms the intersection of the running MOC with that file. :: pymoctool a.fits --intersection b.fits --output intersection.fits """ if self.moc is None: raise CommandError('No MOC information present for intersection') filename = self.params.pop() self.moc = self.moc.intersection(MOC(filename=filename))
[ "def", "intersection", "(", "self", ")", ":", "if", "self", ".", "moc", "is", "None", ":", "raise", "CommandError", "(", "'No MOC information present for intersection'", ")", "filename", "=", "self", ".", "params", ".", "pop", "(", ")", "self", ".", "moc", "=", "self", ".", "moc", ".", "intersection", "(", "MOC", "(", "filename", "=", "filename", ")", ")" ]
Compute the intersection with the given MOC. This command takes the name of a MOC file and forms the intersection of the running MOC with that file. :: pymoctool a.fits --intersection b.fits --output intersection.fits
[ "Compute", "the", "intersection", "with", "the", "given", "MOC", "." ]
python
train
31.1875
gem/oq-engine
openquake/hazardlib/gsim/cauzzi_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/cauzzi_2014.py#L401-L419
def _compute_mean(self, C, rup, dists, sites, imt): """ Returns the mean ground motion acceleration and velocity """ mean = (self._get_magnitude_scaling_term(C, rup.mag) + self._get_distance_scaling_term(C, rup.mag, dists.rrup) + self._get_site_amplification_term(C, sites.vs30)) # convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV # is already in cm/s) and also convert from base 10 to base e. if imt.name == "PGA": mean = np.log((10 ** mean) * ((2 * np.pi / 0.01) ** 2) * 1e-2 / g) elif imt.name == "SA": mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) * 1e-2 / g) else: mean = np.log(10 ** mean) return mean
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "rup", ",", "dists", ",", "sites", ",", "imt", ")", ":", "mean", "=", "(", "self", ".", "_get_magnitude_scaling_term", "(", "C", ",", "rup", ".", "mag", ")", "+", "self", ".", "_get_distance_scaling_term", "(", "C", ",", "rup", ".", "mag", ",", "dists", ".", "rrup", ")", "+", "self", ".", "_get_site_amplification_term", "(", "C", ",", "sites", ".", "vs30", ")", ")", "# convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV", "# is already in cm/s) and also convert from base 10 to base e.", "if", "imt", ".", "name", "==", "\"PGA\"", ":", "mean", "=", "np", ".", "log", "(", "(", "10", "**", "mean", ")", "*", "(", "(", "2", "*", "np", ".", "pi", "/", "0.01", ")", "**", "2", ")", "*", "1e-2", "/", "g", ")", "elif", "imt", ".", "name", "==", "\"SA\"", ":", "mean", "=", "np", ".", "log", "(", "(", "10", "**", "mean", ")", "*", "(", "(", "2", "*", "np", ".", "pi", "/", "imt", ".", "period", ")", "**", "2", ")", "*", "1e-2", "/", "g", ")", "else", ":", "mean", "=", "np", ".", "log", "(", "10", "**", "mean", ")", "return", "mean" ]
Returns the mean ground motion acceleration and velocity
[ "Returns", "the", "mean", "ground", "motion", "acceleration", "and", "velocity" ]
python
train
43.315789
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L114-L122
def _is_converged(self): '''Determine if calculation converged; for a relaxation (static) run we look for ionic (electronic) convergence in the output''' if self.is_relaxed(): # relaxation run case return self._get_line(['End of', 'Geometry Optimization'], self.outputf, return_string=False) else: # static run case return self._get_line('convergence has been achieved', self.outputf, return_string=False)
[ "def", "_is_converged", "(", "self", ")", ":", "if", "self", ".", "is_relaxed", "(", ")", ":", "# relaxation run case", "return", "self", ".", "_get_line", "(", "[", "'End of'", ",", "'Geometry Optimization'", "]", ",", "self", ".", "outputf", ",", "return_string", "=", "False", ")", "else", ":", "# static run case", "return", "self", ".", "_get_line", "(", "'convergence has been achieved'", ",", "self", ".", "outputf", ",", "return_string", "=", "False", ")" ]
Determine if calculation converged; for a relaxation (static) run we look for ionic (electronic) convergence in the output
[ "Determine", "if", "calculation", "converged", ";", "for", "a", "relaxation", "(", "static", ")", "run", "we", "look", "for", "ionic", "(", "electronic", ")", "convergence", "in", "the", "output" ]
python
train
53
peterwittek/ncpol2sdpa
ncpol2sdpa/nc_utils.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/nc_utils.py#L398-L434
def generate_operators(name, n_vars=1, hermitian=None, commutative=False): """Generates a number of commutative or noncommutative operators :param name: The prefix in the symbolic representation of the noncommuting variables. This will be suffixed by a number from 0 to n_vars-1 if n_vars > 1. :type name: str. :param n_vars: The number of variables. :type n_vars: int. :param hermitian: Optional parameter to request Hermitian variables . :type hermitian: bool. :param commutative: Optional parameter to request commutative variables. Commutative variables are Hermitian by default. :type commutative: bool. :returns: list of :class:`sympy.physics.quantum.operator.Operator` or :class:`sympy.physics.quantum.operator.HermitianOperator` variables :Example: >>> generate_variables('y', 2, commutative=True) [y0, y1] """ variables = [] for i in range(n_vars): if n_vars > 1: var_name = '%s%s' % (name, i) else: var_name = '%s' % name if hermitian is not None and hermitian: variables.append(HermitianOperator(var_name)) else: variables.append(Operator(var_name)) variables[-1].is_commutative = commutative return variables
[ "def", "generate_operators", "(", "name", ",", "n_vars", "=", "1", ",", "hermitian", "=", "None", ",", "commutative", "=", "False", ")", ":", "variables", "=", "[", "]", "for", "i", "in", "range", "(", "n_vars", ")", ":", "if", "n_vars", ">", "1", ":", "var_name", "=", "'%s%s'", "%", "(", "name", ",", "i", ")", "else", ":", "var_name", "=", "'%s'", "%", "name", "if", "hermitian", "is", "not", "None", "and", "hermitian", ":", "variables", ".", "append", "(", "HermitianOperator", "(", "var_name", ")", ")", "else", ":", "variables", ".", "append", "(", "Operator", "(", "var_name", ")", ")", "variables", "[", "-", "1", "]", ".", "is_commutative", "=", "commutative", "return", "variables" ]
Generates a number of commutative or noncommutative operators :param name: The prefix in the symbolic representation of the noncommuting variables. This will be suffixed by a number from 0 to n_vars-1 if n_vars > 1. :type name: str. :param n_vars: The number of variables. :type n_vars: int. :param hermitian: Optional parameter to request Hermitian variables . :type hermitian: bool. :param commutative: Optional parameter to request commutative variables. Commutative variables are Hermitian by default. :type commutative: bool. :returns: list of :class:`sympy.physics.quantum.operator.Operator` or :class:`sympy.physics.quantum.operator.HermitianOperator` variables :Example: >>> generate_variables('y', 2, commutative=True) [y0, y1]
[ "Generates", "a", "number", "of", "commutative", "or", "noncommutative", "operators" ]
python
train
36
stormpath/stormpath-django
django_stormpath/backends.py
https://github.com/stormpath/stormpath-django/blob/af60eb5da2115d94ac313613c5d4e6b9f3d16157/django_stormpath/backends.py#L22-L36
def _stormpath_authenticate(self, username, password): """Check if Stormpath authentication works :param username: Can be actual username or email :param password: Account password Returns an account object if successful or None otherwise. """ APPLICATION = get_application() try: result = APPLICATION.authenticate_account(username, password) return result.account except Error as e: log.debug(e) return None
[ "def", "_stormpath_authenticate", "(", "self", ",", "username", ",", "password", ")", ":", "APPLICATION", "=", "get_application", "(", ")", "try", ":", "result", "=", "APPLICATION", ".", "authenticate_account", "(", "username", ",", "password", ")", "return", "result", ".", "account", "except", "Error", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "return", "None" ]
Check if Stormpath authentication works :param username: Can be actual username or email :param password: Account password Returns an account object if successful or None otherwise.
[ "Check", "if", "Stormpath", "authentication", "works" ]
python
train
33.866667
peterwittek/ncpol2sdpa
ncpol2sdpa/physics_utils.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/physics_utils.py#L17-L44
def get_neighbors(index, lattice_length, width=0, periodic=False): """Get the forward neighbors of a site in a lattice. :param index: Linear index of operator. :type index: int. :param lattice_length: The size of the 2D lattice in either dimension :type lattice_length: int. :param width: Optional parameter to define width. :type width: int. :param periodic: Optional parameter to indicate periodic boundary conditions. :type periodic: bool :returns: list of int -- the neighbors in linear index. """ if width == 0: width = lattice_length neighbors = [] coords = divmod(index, width) if coords[1] < width - 1: neighbors.append(index + 1) elif periodic and width > 1: neighbors.append(index - width + 1) if coords[0] < lattice_length - 1: neighbors.append(index + width) elif periodic: neighbors.append(index - (lattice_length - 1) * width) return neighbors
[ "def", "get_neighbors", "(", "index", ",", "lattice_length", ",", "width", "=", "0", ",", "periodic", "=", "False", ")", ":", "if", "width", "==", "0", ":", "width", "=", "lattice_length", "neighbors", "=", "[", "]", "coords", "=", "divmod", "(", "index", ",", "width", ")", "if", "coords", "[", "1", "]", "<", "width", "-", "1", ":", "neighbors", ".", "append", "(", "index", "+", "1", ")", "elif", "periodic", "and", "width", ">", "1", ":", "neighbors", ".", "append", "(", "index", "-", "width", "+", "1", ")", "if", "coords", "[", "0", "]", "<", "lattice_length", "-", "1", ":", "neighbors", ".", "append", "(", "index", "+", "width", ")", "elif", "periodic", ":", "neighbors", ".", "append", "(", "index", "-", "(", "lattice_length", "-", "1", ")", "*", "width", ")", "return", "neighbors" ]
Get the forward neighbors of a site in a lattice. :param index: Linear index of operator. :type index: int. :param lattice_length: The size of the 2D lattice in either dimension :type lattice_length: int. :param width: Optional parameter to define width. :type width: int. :param periodic: Optional parameter to indicate periodic boundary conditions. :type periodic: bool :returns: list of int -- the neighbors in linear index.
[ "Get", "the", "forward", "neighbors", "of", "a", "site", "in", "a", "lattice", "." ]
python
train
34.607143
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L446-L456
def server_hardware_types(self): """ Gets the ServerHardwareTypes API client. Returns: ServerHardwareTypes: """ if not self.__server_hardware_types: self.__server_hardware_types = ServerHardwareTypes( self.__connection) return self.__server_hardware_types
[ "def", "server_hardware_types", "(", "self", ")", ":", "if", "not", "self", ".", "__server_hardware_types", ":", "self", ".", "__server_hardware_types", "=", "ServerHardwareTypes", "(", "self", ".", "__connection", ")", "return", "self", ".", "__server_hardware_types" ]
Gets the ServerHardwareTypes API client. Returns: ServerHardwareTypes:
[ "Gets", "the", "ServerHardwareTypes", "API", "client", "." ]
python
train
30.363636
google/transitfeed
kmlwriter.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/kmlwriter.py#L675-L695
def _CreateShapePointFolder(self, shapes_folder, shape): """Create a KML Folder containing all the shape points in a shape. The folder contains placemarks for each shapepoint. Args: shapes_folder: A KML Shape Folder ElementTree.Element instance shape: The shape to plot. Returns: The Folder ElementTree.Element instance or None. """ folder_name = shape.shape_id + ' Shape Points' folder = self._CreateFolder(shapes_folder, folder_name, visible=False) for (index, (lat, lon, dist)) in enumerate(shape.points): placemark = self._CreatePlacemark(folder, str(index+1)) point = ET.SubElement(placemark, 'Point') coordinates = ET.SubElement(point, 'coordinates') coordinates.text = '%.6f,%.6f' % (lon, lat) return folder
[ "def", "_CreateShapePointFolder", "(", "self", ",", "shapes_folder", ",", "shape", ")", ":", "folder_name", "=", "shape", ".", "shape_id", "+", "' Shape Points'", "folder", "=", "self", ".", "_CreateFolder", "(", "shapes_folder", ",", "folder_name", ",", "visible", "=", "False", ")", "for", "(", "index", ",", "(", "lat", ",", "lon", ",", "dist", ")", ")", "in", "enumerate", "(", "shape", ".", "points", ")", ":", "placemark", "=", "self", ".", "_CreatePlacemark", "(", "folder", ",", "str", "(", "index", "+", "1", ")", ")", "point", "=", "ET", ".", "SubElement", "(", "placemark", ",", "'Point'", ")", "coordinates", "=", "ET", ".", "SubElement", "(", "point", ",", "'coordinates'", ")", "coordinates", ".", "text", "=", "'%.6f,%.6f'", "%", "(", "lon", ",", "lat", ")", "return", "folder" ]
Create a KML Folder containing all the shape points in a shape. The folder contains placemarks for each shapepoint. Args: shapes_folder: A KML Shape Folder ElementTree.Element instance shape: The shape to plot. Returns: The Folder ElementTree.Element instance or None.
[ "Create", "a", "KML", "Folder", "containing", "all", "the", "shape", "points", "in", "a", "shape", "." ]
python
train
36.952381
gtaylor/python-route53
route53/connection.py
https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/connection.py#L53-L111
def _do_autopaginating_api_call(self, path, params, method, parser_func, next_marker_xpath, next_marker_param_name, next_type_xpath=None, parser_kwargs=None): """ Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. :param str method: The API method on the endpoint. :param dict params: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :param str next_marker_param_name: The XPath to the marker tag that will determine whether we continue paginating. :param str next_marker_param_name: The parameter name to manipulate in the request data to bring up the next page on the next request loop. :keyword str next_type_xpath: For the py:meth:`list_resource_record_sets_by_zone_id` method, there's an additional paginator token. Specifying this XPath looks for it. :keyword dict parser_kwargs: Optional dict of additional kwargs to pass on to the parser function. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method. """ if not parser_kwargs: parser_kwargs = {} # We loop indefinitely since we have no idea how many "pages" of # results we're going to have to go through. while True: # An lxml Element node. root = self._send_request(path, params, method) # Individually yield HostedZone instances after parsing/instantiating. for record in parser_func(root, connection=self, **parser_kwargs): yield record # This will determine at what offset we start the next query. next_marker = root.find(next_marker_xpath) if next_marker is None: # If the NextMarker tag is absent, we know we've hit the # last page. break # if NextMarker is present, we'll adjust our API request params # and query again for the next page. params[next_marker_param_name] = next_marker.text if next_type_xpath: # This is a _list_resource_record_sets_by_zone_id call. Look # for the given tag via XPath and adjust our type arg for # the next request. Without specifying this, we loop # infinitely. next_type = root.find(next_type_xpath) params['type'] = next_type.text
[ "def", "_do_autopaginating_api_call", "(", "self", ",", "path", ",", "params", ",", "method", ",", "parser_func", ",", "next_marker_xpath", ",", "next_marker_param_name", ",", "next_type_xpath", "=", "None", ",", "parser_kwargs", "=", "None", ")", ":", "if", "not", "parser_kwargs", ":", "parser_kwargs", "=", "{", "}", "# We loop indefinitely since we have no idea how many \"pages\" of", "# results we're going to have to go through.", "while", "True", ":", "# An lxml Element node.", "root", "=", "self", ".", "_send_request", "(", "path", ",", "params", ",", "method", ")", "# Individually yield HostedZone instances after parsing/instantiating.", "for", "record", "in", "parser_func", "(", "root", ",", "connection", "=", "self", ",", "*", "*", "parser_kwargs", ")", ":", "yield", "record", "# This will determine at what offset we start the next query.", "next_marker", "=", "root", ".", "find", "(", "next_marker_xpath", ")", "if", "next_marker", "is", "None", ":", "# If the NextMarker tag is absent, we know we've hit the", "# last page.", "break", "# if NextMarker is present, we'll adjust our API request params", "# and query again for the next page.", "params", "[", "next_marker_param_name", "]", "=", "next_marker", ".", "text", "if", "next_type_xpath", ":", "# This is a _list_resource_record_sets_by_zone_id call. Look", "# for the given tag via XPath and adjust our type arg for", "# the next request. Without specifying this, we loop", "# infinitely.", "next_type", "=", "root", ".", "find", "(", "next_type_xpath", ")", "params", "[", "'type'", "]", "=", "next_type", ".", "text" ]
Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. :param str method: The API method on the endpoint. :param dict params: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :param str next_marker_param_name: The XPath to the marker tag that will determine whether we continue paginating. :param str next_marker_param_name: The parameter name to manipulate in the request data to bring up the next page on the next request loop. :keyword str next_type_xpath: For the py:meth:`list_resource_record_sets_by_zone_id` method, there's an additional paginator token. Specifying this XPath looks for it. :keyword dict parser_kwargs: Optional dict of additional kwargs to pass on to the parser function. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method.
[ "Given", "an", "API", "method", "the", "arguments", "passed", "to", "it", "and", "a", "function", "to", "hand", "parsing", "off", "to", "loop", "through", "the", "record", "sets", "in", "the", "API", "call", "until", "all", "records", "have", "been", "yielded", "." ]
python
test
45.644068
has2k1/plotnine
plotnine/themes/themeable.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/themeable.py#L129-L138
def merge(self, other): """ Merge properties of other into self Raises ValueError if any them are a blank """ if self.is_blank() or other.is_blank(): raise ValueError('Cannot merge if there is a blank.') else: self.properties.update(other.properties)
[ "def", "merge", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_blank", "(", ")", "or", "other", ".", "is_blank", "(", ")", ":", "raise", "ValueError", "(", "'Cannot merge if there is a blank.'", ")", "else", ":", "self", ".", "properties", ".", "update", "(", "other", ".", "properties", ")" ]
Merge properties of other into self Raises ValueError if any them are a blank
[ "Merge", "properties", "of", "other", "into", "self" ]
python
train
31.4
klen/makesite
makesite/site.py
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/site.py#L75-L98
def paste_template(self, template_name, template=None, deploy_dir=None): " Paste template. " LOGGER.debug("Paste template: %s" % template_name) deploy_dir = deploy_dir or self.deploy_dir template = template or self._get_template_path(template_name) self.read([op.join(template, settings.CFGNAME)], extending=True) for fname in gen_template_files(template): curdir = op.join(deploy_dir, op.dirname(fname)) if not op.exists(curdir): makedirs(curdir) source = op.join(template, fname) target = op.join(deploy_dir, fname) copy2(source, target) name, ext = op.splitext(fname) if ext == '.tmpl': t = Template.from_filename(target, namespace=self.as_dict()) with open(op.join(deploy_dir, name), 'w') as f: f.write(t.substitute()) remove(target) return deploy_dir
[ "def", "paste_template", "(", "self", ",", "template_name", ",", "template", "=", "None", ",", "deploy_dir", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"Paste template: %s\"", "%", "template_name", ")", "deploy_dir", "=", "deploy_dir", "or", "self", ".", "deploy_dir", "template", "=", "template", "or", "self", ".", "_get_template_path", "(", "template_name", ")", "self", ".", "read", "(", "[", "op", ".", "join", "(", "template", ",", "settings", ".", "CFGNAME", ")", "]", ",", "extending", "=", "True", ")", "for", "fname", "in", "gen_template_files", "(", "template", ")", ":", "curdir", "=", "op", ".", "join", "(", "deploy_dir", ",", "op", ".", "dirname", "(", "fname", ")", ")", "if", "not", "op", ".", "exists", "(", "curdir", ")", ":", "makedirs", "(", "curdir", ")", "source", "=", "op", ".", "join", "(", "template", ",", "fname", ")", "target", "=", "op", ".", "join", "(", "deploy_dir", ",", "fname", ")", "copy2", "(", "source", ",", "target", ")", "name", ",", "ext", "=", "op", ".", "splitext", "(", "fname", ")", "if", "ext", "==", "'.tmpl'", ":", "t", "=", "Template", ".", "from_filename", "(", "target", ",", "namespace", "=", "self", ".", "as_dict", "(", ")", ")", "with", "open", "(", "op", ".", "join", "(", "deploy_dir", ",", "name", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "t", ".", "substitute", "(", ")", ")", "remove", "(", "target", ")", "return", "deploy_dir" ]
Paste template.
[ "Paste", "template", "." ]
python
train
40
hollenstein/maspy
maspy/auxiliary.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/auxiliary.py#L733-L803
def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4, cycles=10, minKnotPoints=10, initialKnots=200, splineOrder=2, terminalExpansion=0.1 ): """ #TODO: docstring Note: Expects sorted arrays. :param dependentVar: #TODO: docstring :param independentVar: #TODO: docstring :param subsetPercentage: #TODO: docstring :param cycles: #TODO: docstring :param minKnotPoints: #TODO: docstring :param initialKnots: #TODO: docstring :param splineOrder: #TODO: docstring :param terminalExpansion: expand subsets on both sides :returns: #TODO: docstring """ expansions = ddict(list) expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion #adds 100 data points at both ends of the dependent and independent array for i in range(100): expansions['indUp'].append(independentVar[-1] + expansionArea/100*i) expansions['indDown'].append(independentVar[0] - expansionArea/100*(100-i+1) ) expansions['depUp'].append(dependentVar[-1]) expansions['depDown'].append(dependentVar[0]) dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) + expansions['depUp'], dtype=numpy.float64 ) independentVar = numpy.array(expansions['indDown'] + list(independentVar) + expansions['indUp'], dtype=numpy.float64 ) splineList = list() for cycle in range(cycles): subset = sorted(random.sample(range(len(dependentVar)), int(len(dependentVar) * subsetPercentage) ) ) terminalExpansion dependentSubset = dependentVar[subset] independentSubset = independentVar[subset] minIndVar = independentSubset[minKnotPoints] maxIndVar = independentSubset[-minKnotPoints] knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar for i in range(1, initialKnots) ] ## remove knots with less then minKnotPoints data points ## lastKnot = knots[0] newKnotList = [lastKnot] for knotPos in range(1,len(knots)): nextKnot = knots[knotPos] numHits = (len(independentSubset[(independentSubset >= lastKnot) & (independentSubset <= nextKnot)]) ) if numHits >= minKnotPoints: newKnotList.append(nextKnot) lastKnot = nextKnot knots = newKnotList spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots, k=splineOrder) splineList.append(spline) return splineList
[ "def", "returnSplineList", "(", "dependentVar", ",", "independentVar", ",", "subsetPercentage", "=", "0.4", ",", "cycles", "=", "10", ",", "minKnotPoints", "=", "10", ",", "initialKnots", "=", "200", ",", "splineOrder", "=", "2", ",", "terminalExpansion", "=", "0.1", ")", ":", "expansions", "=", "ddict", "(", "list", ")", "expansionArea", "=", "(", "independentVar", "[", "-", "1", "]", "-", "independentVar", "[", "0", "]", ")", "*", "terminalExpansion", "#adds 100 data points at both ends of the dependent and independent array", "for", "i", "in", "range", "(", "100", ")", ":", "expansions", "[", "'indUp'", "]", ".", "append", "(", "independentVar", "[", "-", "1", "]", "+", "expansionArea", "/", "100", "*", "i", ")", "expansions", "[", "'indDown'", "]", ".", "append", "(", "independentVar", "[", "0", "]", "-", "expansionArea", "/", "100", "*", "(", "100", "-", "i", "+", "1", ")", ")", "expansions", "[", "'depUp'", "]", ".", "append", "(", "dependentVar", "[", "-", "1", "]", ")", "expansions", "[", "'depDown'", "]", ".", "append", "(", "dependentVar", "[", "0", "]", ")", "dependentVar", "=", "numpy", ".", "array", "(", "expansions", "[", "'depDown'", "]", "+", "list", "(", "dependentVar", ")", "+", "expansions", "[", "'depUp'", "]", ",", "dtype", "=", "numpy", ".", "float64", ")", "independentVar", "=", "numpy", ".", "array", "(", "expansions", "[", "'indDown'", "]", "+", "list", "(", "independentVar", ")", "+", "expansions", "[", "'indUp'", "]", ",", "dtype", "=", "numpy", ".", "float64", ")", "splineList", "=", "list", "(", ")", "for", "cycle", "in", "range", "(", "cycles", ")", ":", "subset", "=", "sorted", "(", "random", ".", "sample", "(", "range", "(", "len", "(", "dependentVar", ")", ")", ",", "int", "(", "len", "(", "dependentVar", ")", "*", "subsetPercentage", ")", ")", ")", "terminalExpansion", "dependentSubset", "=", "dependentVar", "[", "subset", "]", "independentSubset", "=", "independentVar", "[", "subset", "]", "minIndVar", "=", "independentSubset", "[", "minKnotPoints", "]", "maxIndVar", "=", "independentSubset", "[", "-", "minKnotPoints", "]", "knots", "=", "[", "float", "(", "i", ")", "*", "(", "maxIndVar", "-", "minIndVar", ")", "/", "initialKnots", "+", "minIndVar", "for", "i", "in", "range", "(", "1", ",", "initialKnots", ")", "]", "## remove knots with less then minKnotPoints data points ##", "lastKnot", "=", "knots", "[", "0", "]", "newKnotList", "=", "[", "lastKnot", "]", "for", "knotPos", "in", "range", "(", "1", ",", "len", "(", "knots", ")", ")", ":", "nextKnot", "=", "knots", "[", "knotPos", "]", "numHits", "=", "(", "len", "(", "independentSubset", "[", "(", "independentSubset", ">=", "lastKnot", ")", "&", "(", "independentSubset", "<=", "nextKnot", ")", "]", ")", ")", "if", "numHits", ">=", "minKnotPoints", ":", "newKnotList", ".", "append", "(", "nextKnot", ")", "lastKnot", "=", "nextKnot", "knots", "=", "newKnotList", "spline", "=", "LSQUnivariateSpline", "(", "independentSubset", ",", "dependentSubset", ",", "knots", ",", "k", "=", "splineOrder", ")", "splineList", ".", "append", "(", "spline", ")", "return", "splineList" ]
#TODO: docstring Note: Expects sorted arrays. :param dependentVar: #TODO: docstring :param independentVar: #TODO: docstring :param subsetPercentage: #TODO: docstring :param cycles: #TODO: docstring :param minKnotPoints: #TODO: docstring :param initialKnots: #TODO: docstring :param splineOrder: #TODO: docstring :param terminalExpansion: expand subsets on both sides :returns: #TODO: docstring
[ "#TODO", ":", "docstring" ]
python
train
40.732394
kashifrazzaqui/again
again/statemachine.py
https://github.com/kashifrazzaqui/again/blob/09cfbda7650d44447dbb0b27780835e9236741ea/again/statemachine.py#L147-L160
def consume(self, event): """ process the current event, setup new state and teardown current state """ future_states = self.can(event) new_state = future_states[0] if len(future_states) > 1: new_state = self.choose(event) event.execute() self.process(event) new_state.setup(event, self) self.teardown(event) return new_state
[ "def", "consume", "(", "self", ",", "event", ")", ":", "future_states", "=", "self", ".", "can", "(", "event", ")", "new_state", "=", "future_states", "[", "0", "]", "if", "len", "(", "future_states", ")", ">", "1", ":", "new_state", "=", "self", ".", "choose", "(", "event", ")", "event", ".", "execute", "(", ")", "self", ".", "process", "(", "event", ")", "new_state", ".", "setup", "(", "event", ",", "self", ")", "self", ".", "teardown", "(", "event", ")", "return", "new_state" ]
process the current event, setup new state and teardown current state
[ "process", "the", "current", "event", "setup", "new", "state", "and", "teardown", "current", "state" ]
python
train
29.5
cloudendpoints/endpoints-python
endpoints/api_config.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L102-L116
def _Enum(docstring, *names): """Utility to generate enum classes used by annotations. Args: docstring: Docstring for the generated enum class. *names: Enum names. Returns: A class that contains enum names as attributes. """ enums = dict(zip(names, range(len(names)))) reverse = dict((value, key) for key, value in enums.iteritems()) enums['reverse_mapping'] = reverse enums['__doc__'] = docstring return type('Enum', (object,), enums)
[ "def", "_Enum", "(", "docstring", ",", "*", "names", ")", ":", "enums", "=", "dict", "(", "zip", "(", "names", ",", "range", "(", "len", "(", "names", ")", ")", ")", ")", "reverse", "=", "dict", "(", "(", "value", ",", "key", ")", "for", "key", ",", "value", "in", "enums", ".", "iteritems", "(", ")", ")", "enums", "[", "'reverse_mapping'", "]", "=", "reverse", "enums", "[", "'__doc__'", "]", "=", "docstring", "return", "type", "(", "'Enum'", ",", "(", "object", ",", ")", ",", "enums", ")" ]
Utility to generate enum classes used by annotations. Args: docstring: Docstring for the generated enum class. *names: Enum names. Returns: A class that contains enum names as attributes.
[ "Utility", "to", "generate", "enum", "classes", "used", "by", "annotations", "." ]
python
train
30.2
Pixelapse/pyglass
pyglass/quicklook/api.py
https://github.com/Pixelapse/pyglass/blob/83cd0ff2b0b7cdaf4ec6f54559a626e67455cd33/pyglass/quicklook/api.py#L24-L44
def preview(src_path): ''' Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page. Blank list if unsupported. ''' preview = embedded_preview(src_path) if not is_valid_preview(preview): preview = generator_preview(src_path) if not is_valid_preview(preview): preview = thumbnail_preview(src_path) # Ensure the preview is returned in the right format if is_valid_preview(preview): if mimetype(preview) in [ExportMimeType.PNG]: return [preview] if mimetype(preview) in [ExportMimeType.PDF]: return to_pngs(preview) return []
[ "def", "preview", "(", "src_path", ")", ":", "preview", "=", "embedded_preview", "(", "src_path", ")", "if", "not", "is_valid_preview", "(", "preview", ")", ":", "preview", "=", "generator_preview", "(", "src_path", ")", "if", "not", "is_valid_preview", "(", "preview", ")", ":", "preview", "=", "thumbnail_preview", "(", "src_path", ")", "# Ensure the preview is returned in the right format", "if", "is_valid_preview", "(", "preview", ")", ":", "if", "mimetype", "(", "preview", ")", "in", "[", "ExportMimeType", ".", "PNG", "]", ":", "return", "[", "preview", "]", "if", "mimetype", "(", "preview", ")", "in", "[", "ExportMimeType", ".", "PDF", "]", ":", "return", "to_pngs", "(", "preview", ")", "return", "[", "]" ]
Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page. Blank list if unsupported.
[ "Generates", "a", "preview", "of", "src_path", "in", "the", "requested", "format", ".", ":", "returns", ":", "A", "list", "of", "preview", "paths", "one", "for", "each", "page", ".", "Blank", "list", "if", "unsupported", "." ]
python
train
28.857143
mwgielen/jackal
jackal/scripts/eternalblue.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/eternalblue.py#L123-L134
def detect_os(self, ip): """ Runs the checker.py scripts to detect the os. """ process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE) out = process.stdout.decode('utf-8').split('\n') system_os = '' for line in out: if line.startswith('Target OS:'): system_os = line.replace('Target OS: ', '') break return system_os
[ "def", "detect_os", "(", "self", ",", "ip", ")", ":", "process", "=", "subprocess", ".", "run", "(", "[", "'python2'", ",", "os", ".", "path", ".", "join", "(", "self", ".", "datadir", ",", "'MS17-010'", ",", "'checker.py'", ")", ",", "str", "(", "ip", ")", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", "=", "process", ".", "stdout", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", "system_os", "=", "''", "for", "line", "in", "out", ":", "if", "line", ".", "startswith", "(", "'Target OS:'", ")", ":", "system_os", "=", "line", ".", "replace", "(", "'Target OS: '", ",", "''", ")", "break", "return", "system_os" ]
Runs the checker.py scripts to detect the os.
[ "Runs", "the", "checker", ".", "py", "scripts", "to", "detect", "the", "os", "." ]
python
valid
40.5
tundish/turberfield-dialogue
turberfield/dialogue/player.py
https://github.com/tundish/turberfield-dialogue/blob/e7ccf7c19ae162e2f315ddf2642394e858529b4a/turberfield/dialogue/player.py#L45-L95
def rehearse( folders, references, handler, repeat=0, roles=1, strict=False, loop=None ): """Cast a set of objects into a sequence of scene scripts. Deliver the performance. :param folders: A sequence of :py:class:`turberfield.dialogue.model.SceneScript.Folder` objects. :param references: A sequence of Python objects. :param handler: A callable object. This will be invoked with every event from the performance. :param int repeat: Extra repetitions of each folder. :param int roles: Maximum number of roles permitted each character. :param bool strict: Only fully-cast scripts to be performed. This function is a generator. It yields events from the performance. """ if isinstance(folders, SceneScript.Folder): folders = [folders] yield from handler(references, loop=loop) matcher = Matcher(folders) performer = Performer(folders, references) while True: folder, index, script, selection, interlude = performer.next( folders, references, strict=strict, roles=roles ) yield from handler(script, loop=loop) for item in performer.run(react=False, strict=strict, roles=roles): yield from handler(item, loop=loop) if isinstance(interlude, Callable): metadata = next(handler( interlude, folder, index, references, loop=loop ), None) yield metadata if metadata is None: return branch = next(matcher.options(metadata)) if branch != folder: performer = Performer([branch], references) if not repeat: break else: repeat -= 1
[ "def", "rehearse", "(", "folders", ",", "references", ",", "handler", ",", "repeat", "=", "0", ",", "roles", "=", "1", ",", "strict", "=", "False", ",", "loop", "=", "None", ")", ":", "if", "isinstance", "(", "folders", ",", "SceneScript", ".", "Folder", ")", ":", "folders", "=", "[", "folders", "]", "yield", "from", "handler", "(", "references", ",", "loop", "=", "loop", ")", "matcher", "=", "Matcher", "(", "folders", ")", "performer", "=", "Performer", "(", "folders", ",", "references", ")", "while", "True", ":", "folder", ",", "index", ",", "script", ",", "selection", ",", "interlude", "=", "performer", ".", "next", "(", "folders", ",", "references", ",", "strict", "=", "strict", ",", "roles", "=", "roles", ")", "yield", "from", "handler", "(", "script", ",", "loop", "=", "loop", ")", "for", "item", "in", "performer", ".", "run", "(", "react", "=", "False", ",", "strict", "=", "strict", ",", "roles", "=", "roles", ")", ":", "yield", "from", "handler", "(", "item", ",", "loop", "=", "loop", ")", "if", "isinstance", "(", "interlude", ",", "Callable", ")", ":", "metadata", "=", "next", "(", "handler", "(", "interlude", ",", "folder", ",", "index", ",", "references", ",", "loop", "=", "loop", ")", ",", "None", ")", "yield", "metadata", "if", "metadata", "is", "None", ":", "return", "branch", "=", "next", "(", "matcher", ".", "options", "(", "metadata", ")", ")", "if", "branch", "!=", "folder", ":", "performer", "=", "Performer", "(", "[", "branch", "]", ",", "references", ")", "if", "not", "repeat", ":", "break", "else", ":", "repeat", "-=", "1" ]
Cast a set of objects into a sequence of scene scripts. Deliver the performance. :param folders: A sequence of :py:class:`turberfield.dialogue.model.SceneScript.Folder` objects. :param references: A sequence of Python objects. :param handler: A callable object. This will be invoked with every event from the performance. :param int repeat: Extra repetitions of each folder. :param int roles: Maximum number of roles permitted each character. :param bool strict: Only fully-cast scripts to be performed. This function is a generator. It yields events from the performance.
[ "Cast", "a", "set", "of", "objects", "into", "a", "sequence", "of", "scene", "scripts", ".", "Deliver", "the", "performance", "." ]
python
train
33.411765
aiven/pghoard
pghoard/rohmu/encryptor.py
https://github.com/aiven/pghoard/blob/2994165d4ef3ff7a5669a2527346bcbfb5b3bd8a/pghoard/rohmu/encryptor.py#L101-L109
def write(self, data): """Encrypt and write the given bytes""" self._check_not_closed() if not data: return 0 enc_data = self.encryptor.update(data) self.next_fp.write(enc_data) self.offset += len(data) return len(data)
[ "def", "write", "(", "self", ",", "data", ")", ":", "self", ".", "_check_not_closed", "(", ")", "if", "not", "data", ":", "return", "0", "enc_data", "=", "self", ".", "encryptor", ".", "update", "(", "data", ")", "self", ".", "next_fp", ".", "write", "(", "enc_data", ")", "self", ".", "offset", "+=", "len", "(", "data", ")", "return", "len", "(", "data", ")" ]
Encrypt and write the given bytes
[ "Encrypt", "and", "write", "the", "given", "bytes" ]
python
train
31
proteanhq/protean
src/protean/impl/repository/dict_repo.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/impl/repository/dict_repo.py#L134-L146
def _set_auto_fields(self, model_obj): """Set the values of the auto field using counter""" for field_name, field_obj in \ self.entity_cls.meta_.auto_fields: counter_key = f'{self.schema_name}_{field_name}' if not (field_name in model_obj and model_obj[field_name] is not None): # Increment the counter and it should start from 1 counter = next(self.conn['counters'][counter_key]) if not counter: counter = next(self.conn['counters'][counter_key]) model_obj[field_name] = counter return model_obj
[ "def", "_set_auto_fields", "(", "self", ",", "model_obj", ")", ":", "for", "field_name", ",", "field_obj", "in", "self", ".", "entity_cls", ".", "meta_", ".", "auto_fields", ":", "counter_key", "=", "f'{self.schema_name}_{field_name}'", "if", "not", "(", "field_name", "in", "model_obj", "and", "model_obj", "[", "field_name", "]", "is", "not", "None", ")", ":", "# Increment the counter and it should start from 1", "counter", "=", "next", "(", "self", ".", "conn", "[", "'counters'", "]", "[", "counter_key", "]", ")", "if", "not", "counter", ":", "counter", "=", "next", "(", "self", ".", "conn", "[", "'counters'", "]", "[", "counter_key", "]", ")", "model_obj", "[", "field_name", "]", "=", "counter", "return", "model_obj" ]
Set the values of the auto field using counter
[ "Set", "the", "values", "of", "the", "auto", "field", "using", "counter" ]
python
train
48.692308
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L7644-L7687
def kxtrct(keywd, terms, nterms, instring, termlen=_default_len_out, stringlen=_default_len_out, substrlen=_default_len_out): """ Locate a keyword in a string and extract the substring from the beginning of the first word following the keyword to the beginning of the first subsequent recognized terminator of a list. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html :param keywd: Word that marks the beginning of text of interest. :type keywd: str :param terms: Set of words, any of which marks the end of text. :type terms: Array of str :param nterms: Number of terms. :type nterms: int :param instring: String containing a sequence of words. :type instring: str :param termlen: Length of strings in string array term. :type termlen: int :param stringlen: Available space in argument string. :type stringlen: int :param substrlen: Available space in output substring. :type substrlen: int :return: String containing a sequence of words, String from end of keywd to beginning of first terms item found. :rtype: tuple """ assert nterms <= len(terms) # Python strings and string arrays => to C char pointers keywd = stypes.stringToCharP(keywd) terms = stypes.listToCharArrayPtr([s[:termlen-1] for s in terms[:nterms]],xLen=termlen,yLen=nterms) instring = stypes.stringToCharP(instring[:stringlen-1],inlen=stringlen) substr = stypes.stringToCharP(substrlen) # Python ints => to C ints termlen = ctypes.c_int(termlen) nterms = ctypes.c_int(nterms) stringlen = ctypes.c_int(stringlen) substrlen = ctypes.c_int(substrlen) found = ctypes.c_int() libspice.kxtrct_c(keywd, termlen, terms, nterms, stringlen, substrlen, instring, ctypes.byref(found), substr) return stypes.toPythonString(instring), stypes.toPythonString( substr), bool(found.value)
[ "def", "kxtrct", "(", "keywd", ",", "terms", ",", "nterms", ",", "instring", ",", "termlen", "=", "_default_len_out", ",", "stringlen", "=", "_default_len_out", ",", "substrlen", "=", "_default_len_out", ")", ":", "assert", "nterms", "<=", "len", "(", "terms", ")", "# Python strings and string arrays => to C char pointers", "keywd", "=", "stypes", ".", "stringToCharP", "(", "keywd", ")", "terms", "=", "stypes", ".", "listToCharArrayPtr", "(", "[", "s", "[", ":", "termlen", "-", "1", "]", "for", "s", "in", "terms", "[", ":", "nterms", "]", "]", ",", "xLen", "=", "termlen", ",", "yLen", "=", "nterms", ")", "instring", "=", "stypes", ".", "stringToCharP", "(", "instring", "[", ":", "stringlen", "-", "1", "]", ",", "inlen", "=", "stringlen", ")", "substr", "=", "stypes", ".", "stringToCharP", "(", "substrlen", ")", "# Python ints => to C ints", "termlen", "=", "ctypes", ".", "c_int", "(", "termlen", ")", "nterms", "=", "ctypes", ".", "c_int", "(", "nterms", ")", "stringlen", "=", "ctypes", ".", "c_int", "(", "stringlen", ")", "substrlen", "=", "ctypes", ".", "c_int", "(", "substrlen", ")", "found", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "kxtrct_c", "(", "keywd", ",", "termlen", ",", "terms", ",", "nterms", ",", "stringlen", ",", "substrlen", ",", "instring", ",", "ctypes", ".", "byref", "(", "found", ")", ",", "substr", ")", "return", "stypes", ".", "toPythonString", "(", "instring", ")", ",", "stypes", ".", "toPythonString", "(", "substr", ")", ",", "bool", "(", "found", ".", "value", ")" ]
Locate a keyword in a string and extract the substring from the beginning of the first word following the keyword to the beginning of the first subsequent recognized terminator of a list. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html :param keywd: Word that marks the beginning of text of interest. :type keywd: str :param terms: Set of words, any of which marks the end of text. :type terms: Array of str :param nterms: Number of terms. :type nterms: int :param instring: String containing a sequence of words. :type instring: str :param termlen: Length of strings in string array term. :type termlen: int :param stringlen: Available space in argument string. :type stringlen: int :param substrlen: Available space in output substring. :type substrlen: int :return: String containing a sequence of words, String from end of keywd to beginning of first terms item found. :rtype: tuple
[ "Locate", "a", "keyword", "in", "a", "string", "and", "extract", "the", "substring", "from", "the", "beginning", "of", "the", "first", "word", "following", "the", "keyword", "to", "the", "beginning", "of", "the", "first", "subsequent", "recognized", "terminator", "of", "a", "list", "." ]
python
train
44.159091
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L789-L801
def qos_rcv_queue_multicast_threshold_traffic_class4(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") rcv_queue = ET.SubElement(qos, "rcv-queue") multicast = ET.SubElement(rcv_queue, "multicast") threshold = ET.SubElement(multicast, "threshold") traffic_class4 = ET.SubElement(threshold, "traffic-class4") traffic_class4.text = kwargs.pop('traffic_class4') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_rcv_queue_multicast_threshold_traffic_class4", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "rcv_queue", "=", "ET", ".", "SubElement", "(", "qos", ",", "\"rcv-queue\"", ")", "multicast", "=", "ET", ".", "SubElement", "(", "rcv_queue", ",", "\"multicast\"", ")", "threshold", "=", "ET", ".", "SubElement", "(", "multicast", ",", "\"threshold\"", ")", "traffic_class4", "=", "ET", ".", "SubElement", "(", "threshold", ",", "\"traffic-class4\"", ")", "traffic_class4", ".", "text", "=", "kwargs", ".", "pop", "(", "'traffic_class4'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
46.846154
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L471-L499
def _get_text_id(self, witness): """Returns the database ID of the Text record for `witness`. This may require creating such a record. If `text`\'s checksum does not match an existing record's checksum, the record's checksum is updated and all associated TextNGram and TextHasNGram records are deleted. :param witness: witness to add a record for :type witness: `WitnessText` :rtype: `int` """ name, siglum = witness.get_names() text_record = self._conn.execute(constants.SELECT_TEXT_SQL, [name, siglum]).fetchone() if text_record is None: text_id = self._add_text_record(witness) else: text_id = text_record['id'] if text_record['checksum'] != witness.get_checksum(): filename = witness.get_filename() self._logger.info('Text {} has changed since it was added to ' 'the database'.format(filename)) self._update_text_record(witness, text_id) self._logger.info('Deleting potentially out-of-date n-grams') self._delete_text_ngrams(text_id) return text_id
[ "def", "_get_text_id", "(", "self", ",", "witness", ")", ":", "name", ",", "siglum", "=", "witness", ".", "get_names", "(", ")", "text_record", "=", "self", ".", "_conn", ".", "execute", "(", "constants", ".", "SELECT_TEXT_SQL", ",", "[", "name", ",", "siglum", "]", ")", ".", "fetchone", "(", ")", "if", "text_record", "is", "None", ":", "text_id", "=", "self", ".", "_add_text_record", "(", "witness", ")", "else", ":", "text_id", "=", "text_record", "[", "'id'", "]", "if", "text_record", "[", "'checksum'", "]", "!=", "witness", ".", "get_checksum", "(", ")", ":", "filename", "=", "witness", ".", "get_filename", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Text {} has changed since it was added to '", "'the database'", ".", "format", "(", "filename", ")", ")", "self", ".", "_update_text_record", "(", "witness", ",", "text_id", ")", "self", ".", "_logger", ".", "info", "(", "'Deleting potentially out-of-date n-grams'", ")", "self", ".", "_delete_text_ngrams", "(", "text_id", ")", "return", "text_id" ]
Returns the database ID of the Text record for `witness`. This may require creating such a record. If `text`\'s checksum does not match an existing record's checksum, the record's checksum is updated and all associated TextNGram and TextHasNGram records are deleted. :param witness: witness to add a record for :type witness: `WitnessText` :rtype: `int`
[ "Returns", "the", "database", "ID", "of", "the", "Text", "record", "for", "witness", "." ]
python
train
42.448276
tensorflow/tensor2tensor
tensor2tensor/utils/diet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/diet.py#L177-L225
def update_variable(self, var, grad_var): """Update the variable and its slots.""" params = self.params global_step = tf.to_float(self.global_step) + 1 # compute learning rate lrate = params.learning_rate if params.learning_rate_decay_scheme == "noam": lrate *= tf.minimum(global_step * params.learning_rate_warmup_steps**-1.5, global_step**-0.5) else: assert params.learning_rate_decay_scheme == "none" lrate *= tf.minimum(global_step / params.learning_rate_warmup_steps, 1.0) # compute adjustment due to second moment slots = params.slots[var.op.name] grad_squared = tf.square(grad_var) beta2_pow = tf.pow(params.beta2, global_step) if params.factored_second_moment_accumulator and len(var.shape) == 2: vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 + tf.reduce_mean(grad_squared, 1, keepdims=True) * (1.0 - params.beta2)) vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 + tf.reduce_mean(grad_squared, 0, keepdims=True) * (1.0 - params.beta2)) with tf.control_dependencies([vr_update, vc_update]): vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon vc = tf.sqrt(slots["adam_vc"] / (1.0 - beta2_pow)) + params.epsilon vc /= tf.reduce_mean(vc) denom = vr * vc else: v_update = tf.assign(slots["adam_v"], slots["adam_v"] * params.beta2 + grad_squared * (1.0 - params.beta2)) with tf.control_dependencies([v_update]): denom = tf.sqrt(slots["adam_v"] / (1.0 - beta2_pow)) + params.epsilon # compute momentum if applicable if params.beta1 != 0.0: m_update = tf.assign(slots["adam_m"], slots["adam_m"] * params.beta1 + grad_var * (1.0 - params.beta1)) with tf.control_dependencies([m_update]): grad_var = slots["adam_m"] # update var subtrahend = lrate * grad_var / denom new_val = _quantize(_dequantize(var, params) - subtrahend, params) return tf.assign(var, new_val)
[ "def", "update_variable", "(", "self", ",", "var", ",", "grad_var", ")", ":", "params", "=", "self", ".", "params", "global_step", "=", "tf", ".", "to_float", "(", "self", ".", "global_step", ")", "+", "1", "# compute learning rate", "lrate", "=", "params", ".", "learning_rate", "if", "params", ".", "learning_rate_decay_scheme", "==", "\"noam\"", ":", "lrate", "*=", "tf", ".", "minimum", "(", "global_step", "*", "params", ".", "learning_rate_warmup_steps", "**", "-", "1.5", ",", "global_step", "**", "-", "0.5", ")", "else", ":", "assert", "params", ".", "learning_rate_decay_scheme", "==", "\"none\"", "lrate", "*=", "tf", ".", "minimum", "(", "global_step", "/", "params", ".", "learning_rate_warmup_steps", ",", "1.0", ")", "# compute adjustment due to second moment", "slots", "=", "params", ".", "slots", "[", "var", ".", "op", ".", "name", "]", "grad_squared", "=", "tf", ".", "square", "(", "grad_var", ")", "beta2_pow", "=", "tf", ".", "pow", "(", "params", ".", "beta2", ",", "global_step", ")", "if", "params", ".", "factored_second_moment_accumulator", "and", "len", "(", "var", ".", "shape", ")", "==", "2", ":", "vr_update", "=", "tf", ".", "assign", "(", "slots", "[", "\"adam_vr\"", "]", ",", "slots", "[", "\"adam_vr\"", "]", "*", "params", ".", "beta2", "+", "tf", ".", "reduce_mean", "(", "grad_squared", ",", "1", ",", "keepdims", "=", "True", ")", "*", "(", "1.0", "-", "params", ".", "beta2", ")", ")", "vc_update", "=", "tf", ".", "assign", "(", "slots", "[", "\"adam_vc\"", "]", ",", "slots", "[", "\"adam_vc\"", "]", "*", "params", ".", "beta2", "+", "tf", ".", "reduce_mean", "(", "grad_squared", ",", "0", ",", "keepdims", "=", "True", ")", "*", "(", "1.0", "-", "params", ".", "beta2", ")", ")", "with", "tf", ".", "control_dependencies", "(", "[", "vr_update", ",", "vc_update", "]", ")", ":", "vr", "=", "tf", ".", "sqrt", "(", "slots", "[", "\"adam_vr\"", "]", "/", "(", "1.0", "-", "beta2_pow", ")", ")", "+", "params", ".", "epsilon", "vc", "=", "tf", ".", "sqrt", "(", "slots", "[", "\"adam_vc\"", "]", "/", "(", "1.0", "-", "beta2_pow", ")", ")", "+", "params", ".", "epsilon", "vc", "/=", "tf", ".", "reduce_mean", "(", "vc", ")", "denom", "=", "vr", "*", "vc", "else", ":", "v_update", "=", "tf", ".", "assign", "(", "slots", "[", "\"adam_v\"", "]", ",", "slots", "[", "\"adam_v\"", "]", "*", "params", ".", "beta2", "+", "grad_squared", "*", "(", "1.0", "-", "params", ".", "beta2", ")", ")", "with", "tf", ".", "control_dependencies", "(", "[", "v_update", "]", ")", ":", "denom", "=", "tf", ".", "sqrt", "(", "slots", "[", "\"adam_v\"", "]", "/", "(", "1.0", "-", "beta2_pow", ")", ")", "+", "params", ".", "epsilon", "# compute momentum if applicable", "if", "params", ".", "beta1", "!=", "0.0", ":", "m_update", "=", "tf", ".", "assign", "(", "slots", "[", "\"adam_m\"", "]", ",", "slots", "[", "\"adam_m\"", "]", "*", "params", ".", "beta1", "+", "grad_var", "*", "(", "1.0", "-", "params", ".", "beta1", ")", ")", "with", "tf", ".", "control_dependencies", "(", "[", "m_update", "]", ")", ":", "grad_var", "=", "slots", "[", "\"adam_m\"", "]", "# update var", "subtrahend", "=", "lrate", "*", "grad_var", "/", "denom", "new_val", "=", "_quantize", "(", "_dequantize", "(", "var", ",", "params", ")", "-", "subtrahend", ",", "params", ")", "return", "tf", ".", "assign", "(", "var", ",", "new_val", ")" ]
Update the variable and its slots.
[ "Update", "the", "variable", "and", "its", "slots", "." ]
python
train
45.22449
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_metrics.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_metrics.py#L221-L232
def NOAJS_metric(bpmn_graph): """ Returns the value of the NOAJS metric (Number of Activities, joins and splits) for the BPMNDiagramGraph instance. :param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model. """ activities_count = all_activities_count(bpmn_graph) gateways_count = all_gateways_count(bpmn_graph) return activities_count + gateways_count
[ "def", "NOAJS_metric", "(", "bpmn_graph", ")", ":", "activities_count", "=", "all_activities_count", "(", "bpmn_graph", ")", "gateways_count", "=", "all_gateways_count", "(", "bpmn_graph", ")", "return", "activities_count", "+", "gateways_count" ]
Returns the value of the NOAJS metric (Number of Activities, joins and splits) for the BPMNDiagramGraph instance. :param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
[ "Returns", "the", "value", "of", "the", "NOAJS", "metric", "(", "Number", "of", "Activities", "joins", "and", "splits", ")", "for", "the", "BPMNDiagramGraph", "instance", "." ]
python
train
32.666667
slarse/clanimtk
clanimtk/util.py
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/util.py#L94-L116
def concatechain(*generators: types.FrameGenerator, separator: str = ''): """Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string. """ while True: try: next_ = [next(gen) for gen in generators] yield separator.join(next_) except StopIteration as exc: return exc.value
[ "def", "concatechain", "(", "*", "generators", ":", "types", ".", "FrameGenerator", ",", "separator", ":", "str", "=", "''", ")", ":", "while", "True", ":", "try", ":", "next_", "=", "[", "next", "(", "gen", ")", "for", "gen", "in", "generators", "]", "yield", "separator", ".", "join", "(", "next_", ")", "except", "StopIteration", "as", "exc", ":", "return", "exc", ".", "value" ]
Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string.
[ "Return", "a", "generator", "that", "in", "each", "iteration", "takes", "one", "value", "from", "each", "of", "the", "supplied", "generators", "joins", "them", "together", "with", "the", "specified", "separator", "and", "yields", "the", "result", ".", "Stops", "as", "soon", "as", "any", "iterator", "raises", "StopIteration", "and", "returns", "the", "value", "contained", "in", "it", "." ]
python
train
43.913043
manolomartinez/greg
greg/classes.py
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/classes.py#L202-L217
def will_tag(self): """ Check whether the feed should be tagged """ wanttags = self.retrieve_config('Tag', 'no') if wanttags == 'yes': if aux.staggerexists: willtag = True else: willtag = False print(("You want me to tag {0}, but you have not installed " "the Stagger module. I cannot honour your request."). format(self.name), file=sys.stderr, flush=True) else: willtag = False return willtag
[ "def", "will_tag", "(", "self", ")", ":", "wanttags", "=", "self", ".", "retrieve_config", "(", "'Tag'", ",", "'no'", ")", "if", "wanttags", "==", "'yes'", ":", "if", "aux", ".", "staggerexists", ":", "willtag", "=", "True", "else", ":", "willtag", "=", "False", "print", "(", "(", "\"You want me to tag {0}, but you have not installed \"", "\"the Stagger module. I cannot honour your request.\"", ")", ".", "format", "(", "self", ".", "name", ")", ",", "file", "=", "sys", ".", "stderr", ",", "flush", "=", "True", ")", "else", ":", "willtag", "=", "False", "return", "willtag" ]
Check whether the feed should be tagged
[ "Check", "whether", "the", "feed", "should", "be", "tagged" ]
python
train
35.1875
mixmastamyk/console
console/core.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/core.py#L468-L478
def set_output(self, outfile): ''' Set's the output file, currently only useful with context-managers. Note: This function is experimental and may not last. ''' if self._orig_stdout: # restore Usted sys.stdout = self._orig_stdout self._stream = outfile sys.stdout = _LineWriter(self, self._stream, self.default)
[ "def", "set_output", "(", "self", ",", "outfile", ")", ":", "if", "self", ".", "_orig_stdout", ":", "# restore Usted", "sys", ".", "stdout", "=", "self", ".", "_orig_stdout", "self", ".", "_stream", "=", "outfile", "sys", ".", "stdout", "=", "_LineWriter", "(", "self", ",", "self", ".", "_stream", ",", "self", ".", "default", ")" ]
Set's the output file, currently only useful with context-managers. Note: This function is experimental and may not last.
[ "Set", "s", "the", "output", "file", "currently", "only", "useful", "with", "context", "-", "managers", "." ]
python
train
34.909091
pytroll/pyorbital
pyorbital/astronomy.py
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L134-L144
def cos_zen(utc_time, lon, lat): """Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees. """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) r_a, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, r_a) return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__))
[ "def", "cos_zen", "(", "utc_time", ",", "lon", ",", "lat", ")", ":", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "r_a", ",", "dec", "=", "sun_ra_dec", "(", "utc_time", ")", "h__", "=", "_local_hour_angle", "(", "utc_time", ",", "lon", ",", "r_a", ")", "return", "(", "np", ".", "sin", "(", "lat", ")", "*", "np", ".", "sin", "(", "dec", ")", "+", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "cos", "(", "dec", ")", "*", "np", ".", "cos", "(", "h__", ")", ")" ]
Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees.
[ "Cosine", "of", "the", "sun", "-", "zenith", "angle", "for", "*", "lon", "*", "*", "lat", "*", "at", "*", "utc_time", "*", ".", "utc_time", ":", "datetime", ".", "datetime", "instance", "of", "the", "UTC", "time", "lon", "and", "lat", "in", "degrees", "." ]
python
train
36.636364
hvac/hvac
hvac/api/auth_methods/okta.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/auth_methods/okta.py#L15-L57
def configure(self, org_name, api_token=None, base_url='okta.com', ttl=None, max_ttl=None, bypass_okta_mfa=False, mount_point=DEFAULT_MOUNT_POINT): """Configure the connection parameters for Okta. This path honors the distinction between the create and update capabilities inside ACL policies. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param org_name: Name of the organization to be used in the Okta API. :type org_name: str | unicode :param api_token: Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled. :type api_token: str | unicode :param base_url: If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com. :type base_url: str | unicode :param ttl: Duration after which authentication will be expired. :type ttl: str | unicode :param max_ttl: Maximum duration after which authentication will be expired. :type max_ttl: str | unicode :param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED. :type bypass_okta_mfa: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'org_name': org_name, 'api_token': api_token, 'base_url': base_url, 'ttl': ttl, 'max_ttl': max_ttl, 'bypass_okta_mfa': bypass_okta_mfa, } api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )
[ "def", "configure", "(", "self", ",", "org_name", ",", "api_token", "=", "None", ",", "base_url", "=", "'okta.com'", ",", "ttl", "=", "None", ",", "max_ttl", "=", "None", ",", "bypass_okta_mfa", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'org_name'", ":", "org_name", ",", "'api_token'", ":", "api_token", ",", "'base_url'", ":", "base_url", ",", "'ttl'", ":", "ttl", ",", "'max_ttl'", ":", "max_ttl", ",", "'bypass_okta_mfa'", ":", "bypass_okta_mfa", ",", "}", "api_path", "=", "'/v1/auth/{mount_point}/config'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Configure the connection parameters for Okta. This path honors the distinction between the create and update capabilities inside ACL policies. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param org_name: Name of the organization to be used in the Okta API. :type org_name: str | unicode :param api_token: Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled. :type api_token: str | unicode :param base_url: If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com. :type base_url: str | unicode :param ttl: Duration after which authentication will be expired. :type ttl: str | unicode :param max_ttl: Maximum duration after which authentication will be expired. :type max_ttl: str | unicode :param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED. :type bypass_okta_mfa: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Configure", "the", "connection", "parameters", "for", "Okta", "." ]
python
train
47.372093
tetframework/Tonnikala
tonnikala/runtime/debug.py
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/runtime/debug.py#L162-L201
def translate_exception(exc_info, initial_skip=0): """If passed an exc_info it will automatically rewrite the exceptions all the way down to the correct line numbers and frames. """ tb = exc_info[2] frames = [] # skip some internal frames if wanted for x in range(initial_skip): if tb is not None: tb = tb.tb_next initial_tb = tb while tb is not None: # skip frames decorated with @internalcode. These are internal # calls we can't avoid and that are useless in template debugging # output. if tb.tb_frame.f_code in internal_code: tb = tb.tb_next continue # save a reference to the next frame if we override the current # one with a faked one. next = tb.tb_next # fake template exceptions template = tb.tb_frame.f_globals.get('__TK_template_info__') if template is not None: lineno = template.get_corresponding_lineno(tb.tb_lineno) tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, lineno)[2] frames.append(make_frame_proxy(tb)) tb = next # if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen? if not frames: reraise(exc_info[0], exc_info[1], exc_info[2]) return ProcessedTraceback(exc_info[0], exc_info[1], frames)
[ "def", "translate_exception", "(", "exc_info", ",", "initial_skip", "=", "0", ")", ":", "tb", "=", "exc_info", "[", "2", "]", "frames", "=", "[", "]", "# skip some internal frames if wanted", "for", "x", "in", "range", "(", "initial_skip", ")", ":", "if", "tb", "is", "not", "None", ":", "tb", "=", "tb", ".", "tb_next", "initial_tb", "=", "tb", "while", "tb", "is", "not", "None", ":", "# skip frames decorated with @internalcode. These are internal", "# calls we can't avoid and that are useless in template debugging", "# output.", "if", "tb", ".", "tb_frame", ".", "f_code", "in", "internal_code", ":", "tb", "=", "tb", ".", "tb_next", "continue", "# save a reference to the next frame if we override the current", "# one with a faked one.", "next", "=", "tb", ".", "tb_next", "# fake template exceptions", "template", "=", "tb", ".", "tb_frame", ".", "f_globals", ".", "get", "(", "'__TK_template_info__'", ")", "if", "template", "is", "not", "None", ":", "lineno", "=", "template", ".", "get_corresponding_lineno", "(", "tb", ".", "tb_lineno", ")", "tb", "=", "fake_exc_info", "(", "exc_info", "[", ":", "2", "]", "+", "(", "tb", ",", ")", ",", "template", ".", "filename", ",", "lineno", ")", "[", "2", "]", "frames", ".", "append", "(", "make_frame_proxy", "(", "tb", ")", ")", "tb", "=", "next", "# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?", "if", "not", "frames", ":", "reraise", "(", "exc_info", "[", "0", "]", ",", "exc_info", "[", "1", "]", ",", "exc_info", "[", "2", "]", ")", "return", "ProcessedTraceback", "(", "exc_info", "[", "0", "]", ",", "exc_info", "[", "1", "]", ",", "frames", ")" ]
If passed an exc_info it will automatically rewrite the exceptions all the way down to the correct line numbers and frames.
[ "If", "passed", "an", "exc_info", "it", "will", "automatically", "rewrite", "the", "exceptions", "all", "the", "way", "down", "to", "the", "correct", "line", "numbers", "and", "frames", "." ]
python
train
35.675
pypyr/pypyr-cli
pypyr/context.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L405-L441
def get_formatted_as_type(self, value, default=None, out_type=str): """Return formatted value for input value, returns as out_type. Caveat emptor: if out_type is bool and value a string, return will be True if str is 'True'. It will be False for all other cases. Args: value: the value to format default: if value is None, set to this out_type: cast return as this type Returns: Formatted value of type out_type """ if value is None: value = default if isinstance(value, SpecialTagDirective): result = value.get_value(self) return types.cast_to_type(result, out_type) if isinstance(value, str): result = self.get_formatted_string(value) result_type = type(result) if out_type is result_type: # get_formatted_string result is already a string return result elif out_type is bool and result_type is str: # casting a str to bool is always True, hence special case. If # the str value is 'False'/'false', presumably user can # reasonably expect a bool False response. return result.lower() in ['true', '1', '1.0'] else: return out_type(result) else: return out_type(value)
[ "def", "get_formatted_as_type", "(", "self", ",", "value", ",", "default", "=", "None", ",", "out_type", "=", "str", ")", ":", "if", "value", "is", "None", ":", "value", "=", "default", "if", "isinstance", "(", "value", ",", "SpecialTagDirective", ")", ":", "result", "=", "value", ".", "get_value", "(", "self", ")", "return", "types", ".", "cast_to_type", "(", "result", ",", "out_type", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "result", "=", "self", ".", "get_formatted_string", "(", "value", ")", "result_type", "=", "type", "(", "result", ")", "if", "out_type", "is", "result_type", ":", "# get_formatted_string result is already a string", "return", "result", "elif", "out_type", "is", "bool", "and", "result_type", "is", "str", ":", "# casting a str to bool is always True, hence special case. If", "# the str value is 'False'/'false', presumably user can", "# reasonably expect a bool False response.", "return", "result", ".", "lower", "(", ")", "in", "[", "'true'", ",", "'1'", ",", "'1.0'", "]", "else", ":", "return", "out_type", "(", "result", ")", "else", ":", "return", "out_type", "(", "value", ")" ]
Return formatted value for input value, returns as out_type. Caveat emptor: if out_type is bool and value a string, return will be True if str is 'True'. It will be False for all other cases. Args: value: the value to format default: if value is None, set to this out_type: cast return as this type Returns: Formatted value of type out_type
[ "Return", "formatted", "value", "for", "input", "value", "returns", "as", "out_type", "." ]
python
train
37.648649
ska-sa/katcp-python
katcp/fake_clients.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/fake_clients.py#L16-L59
def fake_KATCP_client_resource_factory( KATCPClientResourceClass, fake_options, resource_spec, *args, **kwargs): """Create a fake KATCPClientResource-like class and a fake-manager Parameters ---------- KATCPClientResourceClass : class Subclass of :class:`katcp.resource_client.KATCPClientResource` fake_options : dict Options for the faking process. Keys: allow_any_request : bool, default False (TODO not implemented behaves as if it were True) resource_spec, *args, **kwargs : passed to KATCPClientResourceClass A subclass of the passed-in KATCPClientResourceClass is created that replaces the internal InspecingClient instances with fakes using fake_inspecting_client_factory() based on the InspectingClient class used by KATCPClientResourceClass. Returns ------- (fake_katcp_client_resource, fake_katcp_client_resource_manager): fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance Bound to the `fake_katcp_client_resource` instance. """ # TODO Implement allow_any_request functionality. When True, any unknown request (even # if there is no fake implementation) should succeed allow_any_request = fake_options.get('allow_any_request', False) class FakeKATCPClientResource(KATCPClientResourceClass): def inspecting_client_factory(self, host, port, ioloop_set_to): real_instance = (super(FakeKATCPClientResource, self) .inspecting_client_factory(host, port, ioloop_set_to) ) fic, fic_manager = fake_inspecting_client_factory( real_instance.__class__, fake_options, host, port, ioloop=ioloop_set_to, auto_reconnect=self.auto_reconnect) self.fake_inspecting_client_manager = fic_manager return fic fkcr = FakeKATCPClientResource(resource_spec, *args, **kwargs) fkcr_manager = FakeKATCPClientResourceManager(fkcr) return (fkcr, fkcr_manager)
[ "def", "fake_KATCP_client_resource_factory", "(", "KATCPClientResourceClass", ",", "fake_options", ",", "resource_spec", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO Implement allow_any_request functionality. When True, any unknown request (even", "# if there is no fake implementation) should succeed", "allow_any_request", "=", "fake_options", ".", "get", "(", "'allow_any_request'", ",", "False", ")", "class", "FakeKATCPClientResource", "(", "KATCPClientResourceClass", ")", ":", "def", "inspecting_client_factory", "(", "self", ",", "host", ",", "port", ",", "ioloop_set_to", ")", ":", "real_instance", "=", "(", "super", "(", "FakeKATCPClientResource", ",", "self", ")", ".", "inspecting_client_factory", "(", "host", ",", "port", ",", "ioloop_set_to", ")", ")", "fic", ",", "fic_manager", "=", "fake_inspecting_client_factory", "(", "real_instance", ".", "__class__", ",", "fake_options", ",", "host", ",", "port", ",", "ioloop", "=", "ioloop_set_to", ",", "auto_reconnect", "=", "self", ".", "auto_reconnect", ")", "self", ".", "fake_inspecting_client_manager", "=", "fic_manager", "return", "fic", "fkcr", "=", "FakeKATCPClientResource", "(", "resource_spec", ",", "*", "args", ",", "*", "*", "kwargs", ")", "fkcr_manager", "=", "FakeKATCPClientResourceManager", "(", "fkcr", ")", "return", "(", "fkcr", ",", "fkcr_manager", ")" ]
Create a fake KATCPClientResource-like class and a fake-manager Parameters ---------- KATCPClientResourceClass : class Subclass of :class:`katcp.resource_client.KATCPClientResource` fake_options : dict Options for the faking process. Keys: allow_any_request : bool, default False (TODO not implemented behaves as if it were True) resource_spec, *args, **kwargs : passed to KATCPClientResourceClass A subclass of the passed-in KATCPClientResourceClass is created that replaces the internal InspecingClient instances with fakes using fake_inspecting_client_factory() based on the InspectingClient class used by KATCPClientResourceClass. Returns ------- (fake_katcp_client_resource, fake_katcp_client_resource_manager): fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance Bound to the `fake_katcp_client_resource` instance.
[ "Create", "a", "fake", "KATCPClientResource", "-", "like", "class", "and", "a", "fake", "-", "manager" ]
python
train
47.340909
trustar/trustar-python
trustar/models/report.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/models/report.py#L94-L123
def to_dict(self, remove_nones=False): """ Creates a dictionary representation of the object. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the report. """ if remove_nones: report_dict = super().to_dict(remove_nones=True) else: report_dict = { 'title': self.title, 'reportBody': self.body, 'timeBegan': self.time_began, 'externalUrl': self.external_url, 'distributionType': self._get_distribution_type(), 'externalTrackingId': self.external_id, 'enclaveIds': self.enclave_ids, 'created': self.created, 'updated': self.updated, } # id field might not be present if self.id is not None: report_dict['id'] = self.id else: report_dict['id'] = None return report_dict
[ "def", "to_dict", "(", "self", ",", "remove_nones", "=", "False", ")", ":", "if", "remove_nones", ":", "report_dict", "=", "super", "(", ")", ".", "to_dict", "(", "remove_nones", "=", "True", ")", "else", ":", "report_dict", "=", "{", "'title'", ":", "self", ".", "title", ",", "'reportBody'", ":", "self", ".", "body", ",", "'timeBegan'", ":", "self", ".", "time_began", ",", "'externalUrl'", ":", "self", ".", "external_url", ",", "'distributionType'", ":", "self", ".", "_get_distribution_type", "(", ")", ",", "'externalTrackingId'", ":", "self", ".", "external_id", ",", "'enclaveIds'", ":", "self", ".", "enclave_ids", ",", "'created'", ":", "self", ".", "created", ",", "'updated'", ":", "self", ".", "updated", ",", "}", "# id field might not be present", "if", "self", ".", "id", "is", "not", "None", ":", "report_dict", "[", "'id'", "]", "=", "self", ".", "id", "else", ":", "report_dict", "[", "'id'", "]", "=", "None", "return", "report_dict" ]
Creates a dictionary representation of the object. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the report.
[ "Creates", "a", "dictionary", "representation", "of", "the", "object", "." ]
python
train
34.466667
CalebBell/ht
ht/conduction.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conduction.py#L234-L269
def k_to_R_value(k, SI=True): r'''Returns the R-value of a substance given its thermal conductivity, Will return R-value in SI units unless SI is false. SI units are m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch). Parameters ---------- k : float Thermal conductivity of a substance [W/m/K] SI : bool, optional Whether to use the SI conversion or not Returns ------- R_value : float R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)] Notes ----- Provides the reverse conversion of R_value_to_k. Examples -------- >>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False) (0.11999999999999998, 0.7099999999999999) References ---------- .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' r = k_to_thermal_resistivity(k) if SI: return r*inch else: return r/(foot**2*degree_Fahrenheit*hour/Btu/inch)
[ "def", "k_to_R_value", "(", "k", ",", "SI", "=", "True", ")", ":", "r", "=", "k_to_thermal_resistivity", "(", "k", ")", "if", "SI", ":", "return", "r", "*", "inch", "else", ":", "return", "r", "/", "(", "foot", "**", "2", "*", "degree_Fahrenheit", "*", "hour", "/", "Btu", "/", "inch", ")" ]
r'''Returns the R-value of a substance given its thermal conductivity, Will return R-value in SI units unless SI is false. SI units are m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch). Parameters ---------- k : float Thermal conductivity of a substance [W/m/K] SI : bool, optional Whether to use the SI conversion or not Returns ------- R_value : float R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)] Notes ----- Provides the reverse conversion of R_value_to_k. Examples -------- >>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False) (0.11999999999999998, 0.7099999999999999) References ---------- .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010.
[ "r", "Returns", "the", "R", "-", "value", "of", "a", "substance", "given", "its", "thermal", "conductivity", "Will", "return", "R", "-", "value", "in", "SI", "units", "unless", "SI", "is", "false", ".", "SI", "units", "are", "m^2", "K", "/", "(", "W", "*", "inch", ")", ";", "Imperial", "units", "of", "R", "-", "value", "are", "ft^2", "deg", "F", "*", "h", "/", "(", "BTU", "*", "inch", ")", "." ]
python
train
28.638889
explosion/spaCy
spacy/_ml.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L84-L99
def with_cpu(ops, model): """Wrap a model that should run on CPU, transferring inputs and outputs as necessary.""" model.to_cpu() def with_cpu_forward(inputs, drop=0.0): cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop) gpu_outputs = _to_device(ops, cpu_outputs) def with_cpu_backprop(d_outputs, sgd=None): cpu_d_outputs = _to_cpu(d_outputs) return backprop(cpu_d_outputs, sgd=sgd) return gpu_outputs, with_cpu_backprop return wrap(with_cpu_forward, model)
[ "def", "with_cpu", "(", "ops", ",", "model", ")", ":", "model", ".", "to_cpu", "(", ")", "def", "with_cpu_forward", "(", "inputs", ",", "drop", "=", "0.0", ")", ":", "cpu_outputs", ",", "backprop", "=", "model", ".", "begin_update", "(", "_to_cpu", "(", "inputs", ")", ",", "drop", "=", "drop", ")", "gpu_outputs", "=", "_to_device", "(", "ops", ",", "cpu_outputs", ")", "def", "with_cpu_backprop", "(", "d_outputs", ",", "sgd", "=", "None", ")", ":", "cpu_d_outputs", "=", "_to_cpu", "(", "d_outputs", ")", "return", "backprop", "(", "cpu_d_outputs", ",", "sgd", "=", "sgd", ")", "return", "gpu_outputs", ",", "with_cpu_backprop", "return", "wrap", "(", "with_cpu_forward", ",", "model", ")" ]
Wrap a model that should run on CPU, transferring inputs and outputs as necessary.
[ "Wrap", "a", "model", "that", "should", "run", "on", "CPU", "transferring", "inputs", "and", "outputs", "as", "necessary", "." ]
python
train
33.875
inasafe/inasafe
safe/gui/tools/extent_selector_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/extent_selector_dialog.py#L321-L331
def bookmarks_index_changed(self): """Update the UI when the bookmarks combobox has changed.""" index = self.bookmarks_list.currentIndex() if index >= 0: self.tool.reset() rectangle = self.bookmarks_list.itemData(index) self.tool.set_rectangle(rectangle) self.canvas.setExtent(rectangle) self.ok_button.setEnabled(True) else: self.ok_button.setDisabled(True)
[ "def", "bookmarks_index_changed", "(", "self", ")", ":", "index", "=", "self", ".", "bookmarks_list", ".", "currentIndex", "(", ")", "if", "index", ">=", "0", ":", "self", ".", "tool", ".", "reset", "(", ")", "rectangle", "=", "self", ".", "bookmarks_list", ".", "itemData", "(", "index", ")", "self", ".", "tool", ".", "set_rectangle", "(", "rectangle", ")", "self", ".", "canvas", ".", "setExtent", "(", "rectangle", ")", "self", ".", "ok_button", ".", "setEnabled", "(", "True", ")", "else", ":", "self", ".", "ok_button", ".", "setDisabled", "(", "True", ")" ]
Update the UI when the bookmarks combobox has changed.
[ "Update", "the", "UI", "when", "the", "bookmarks", "combobox", "has", "changed", "." ]
python
train
41.090909
apache/incubator-mxnet
python/mxnet/contrib/autograd.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/autograd.py#L195-L230
def grad(func, argnum=None): """Return function that computes gradient of arguments. Parameters ---------- func: a python function The forward (loss) function. argnum: an int or a list of int The index of argument to calculate gradient for. Returns ------- grad_func: a python function A function that would compute the gradient of arguments. Examples -------- >>> # autograd supports dynamic graph which is changed >>> # every instance >>> def func(x): >>> r = random.randint(0, 1) >>> if r % 2: >>> return x**2 >>> else: >>> return x/3 >>> # use `grad(func)` to get the gradient function >>> for x in range(10): >>> grad_func = grad(func) >>> inputs = nd.array([[1, 2, 3], [4, 5, 6]]) >>> grad_vals = grad_func(inputs) """ grad_with_loss_func = grad_and_loss(func, argnum) @functools.wraps(grad_with_loss_func) def wrapped(*args): return grad_with_loss_func(*args)[0] return wrapped
[ "def", "grad", "(", "func", ",", "argnum", "=", "None", ")", ":", "grad_with_loss_func", "=", "grad_and_loss", "(", "func", ",", "argnum", ")", "@", "functools", ".", "wraps", "(", "grad_with_loss_func", ")", "def", "wrapped", "(", "*", "args", ")", ":", "return", "grad_with_loss_func", "(", "*", "args", ")", "[", "0", "]", "return", "wrapped" ]
Return function that computes gradient of arguments. Parameters ---------- func: a python function The forward (loss) function. argnum: an int or a list of int The index of argument to calculate gradient for. Returns ------- grad_func: a python function A function that would compute the gradient of arguments. Examples -------- >>> # autograd supports dynamic graph which is changed >>> # every instance >>> def func(x): >>> r = random.randint(0, 1) >>> if r % 2: >>> return x**2 >>> else: >>> return x/3 >>> # use `grad(func)` to get the gradient function >>> for x in range(10): >>> grad_func = grad(func) >>> inputs = nd.array([[1, 2, 3], [4, 5, 6]]) >>> grad_vals = grad_func(inputs)
[ "Return", "function", "that", "computes", "gradient", "of", "arguments", "." ]
python
train
28.777778
Azure/azure-uamqp-python
uamqp/message.py
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/message.py#L495-L525
def gather(self): """Return all the messages represented by this object. This will convert the batch data into individual Message objects, which may be one or more if multi_messages is set to `True`. :rtype: list[~uamqp.message.Message] """ if self._multi_messages: return self._multi_message_generator() new_message = self._create_batch_message() message_size = new_message.get_message_encoded_size() + self.size_offset body_size = 0 for data in self._body_gen: message_bytes = None try: if not data.application_properties: # Message-like object data.application_properties = self.application_properties message_bytes = data.encode_message() except AttributeError: # raw data wrap_message = Message(body=data, application_properties=self.application_properties) message_bytes = wrap_message.encode_message() body_size += len(message_bytes) if (body_size + message_size) > self.max_message_length: raise ValueError( "Data set too large for a single message." "Set multi_messages to True to split data across multiple messages.") new_message._body.append(message_bytes) # pylint: disable=protected-access new_message.on_send_complete = self.on_send_complete return [new_message]
[ "def", "gather", "(", "self", ")", ":", "if", "self", ".", "_multi_messages", ":", "return", "self", ".", "_multi_message_generator", "(", ")", "new_message", "=", "self", ".", "_create_batch_message", "(", ")", "message_size", "=", "new_message", ".", "get_message_encoded_size", "(", ")", "+", "self", ".", "size_offset", "body_size", "=", "0", "for", "data", "in", "self", ".", "_body_gen", ":", "message_bytes", "=", "None", "try", ":", "if", "not", "data", ".", "application_properties", ":", "# Message-like object", "data", ".", "application_properties", "=", "self", ".", "application_properties", "message_bytes", "=", "data", ".", "encode_message", "(", ")", "except", "AttributeError", ":", "# raw data", "wrap_message", "=", "Message", "(", "body", "=", "data", ",", "application_properties", "=", "self", ".", "application_properties", ")", "message_bytes", "=", "wrap_message", ".", "encode_message", "(", ")", "body_size", "+=", "len", "(", "message_bytes", ")", "if", "(", "body_size", "+", "message_size", ")", ">", "self", ".", "max_message_length", ":", "raise", "ValueError", "(", "\"Data set too large for a single message.\"", "\"Set multi_messages to True to split data across multiple messages.\"", ")", "new_message", ".", "_body", ".", "append", "(", "message_bytes", ")", "# pylint: disable=protected-access", "new_message", ".", "on_send_complete", "=", "self", ".", "on_send_complete", "return", "[", "new_message", "]" ]
Return all the messages represented by this object. This will convert the batch data into individual Message objects, which may be one or more if multi_messages is set to `True`. :rtype: list[~uamqp.message.Message]
[ "Return", "all", "the", "messages", "represented", "by", "this", "object", ".", "This", "will", "convert", "the", "batch", "data", "into", "individual", "Message", "objects", "which", "may", "be", "one", "or", "more", "if", "multi_messages", "is", "set", "to", "True", "." ]
python
train
47.516129
dbtsai/python-mimeparse
mimeparse.py
https://github.com/dbtsai/python-mimeparse/blob/cf605c0994149b1a1936b3a8a597203fe3fbb62e/mimeparse.py#L155-L184
def best_match(supported, header): """Return mime-type with the highest quality ('q') from list of candidates. Takes a list of supported mime-types and finds the best match for all the media-ranges listed in header. The value of header must be a string that conforms to the format of the HTTP Accept: header. The value of 'supported' is a list of mime-types. The list of supported mime-types should be sorted in order of increasing desirability, in case of a situation where there is a tie. >>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1') 'text/xml' :rtype: str """ split_header = _filter_blank(header.split(',')) parsed_header = [parse_media_range(r) for r in split_header] weighted_matches = [] pos = 0 for mime_type in supported: weighted_matches.append(( quality_and_fitness_parsed(mime_type, parsed_header), pos, mime_type )) pos += 1 weighted_matches.sort() return weighted_matches[-1][0][0] and weighted_matches[-1][2] or ''
[ "def", "best_match", "(", "supported", ",", "header", ")", ":", "split_header", "=", "_filter_blank", "(", "header", ".", "split", "(", "','", ")", ")", "parsed_header", "=", "[", "parse_media_range", "(", "r", ")", "for", "r", "in", "split_header", "]", "weighted_matches", "=", "[", "]", "pos", "=", "0", "for", "mime_type", "in", "supported", ":", "weighted_matches", ".", "append", "(", "(", "quality_and_fitness_parsed", "(", "mime_type", ",", "parsed_header", ")", ",", "pos", ",", "mime_type", ")", ")", "pos", "+=", "1", "weighted_matches", ".", "sort", "(", ")", "return", "weighted_matches", "[", "-", "1", "]", "[", "0", "]", "[", "0", "]", "and", "weighted_matches", "[", "-", "1", "]", "[", "2", "]", "or", "''" ]
Return mime-type with the highest quality ('q') from list of candidates. Takes a list of supported mime-types and finds the best match for all the media-ranges listed in header. The value of header must be a string that conforms to the format of the HTTP Accept: header. The value of 'supported' is a list of mime-types. The list of supported mime-types should be sorted in order of increasing desirability, in case of a situation where there is a tie. >>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1') 'text/xml' :rtype: str
[ "Return", "mime", "-", "type", "with", "the", "highest", "quality", "(", "q", ")", "from", "list", "of", "candidates", "." ]
python
train
36.3
unbservices/clams
clams/__init__.py
https://github.com/unbservices/clams/blob/2ae0a36eb8f82a153d27f74ef37688f976952789/clams/__init__.py#L360-L363
def _attach_arguments(self): """Add the registered arguments to the parser.""" for arg in self.arguments: self.parser.add_argument(*arg[0], **arg[1])
[ "def", "_attach_arguments", "(", "self", ")", ":", "for", "arg", "in", "self", ".", "arguments", ":", "self", ".", "parser", ".", "add_argument", "(", "*", "arg", "[", "0", "]", ",", "*", "*", "arg", "[", "1", "]", ")" ]
Add the registered arguments to the parser.
[ "Add", "the", "registered", "arguments", "to", "the", "parser", "." ]
python
train
43.5
angr/angr
angr/analyses/reassembler.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/reassembler.py#L1223-L1236
def desymbolize(self): """ We believe this was a pointer and symbolized it before. Now we want to desymbolize it. The following actions are performed: - Reload content from memory - Mark the sort as 'unknown' :return: None """ self.sort = 'unknown' content = self.binary.fast_memory_load(self.addr, self.size, bytes) self.content = [ content ]
[ "def", "desymbolize", "(", "self", ")", ":", "self", ".", "sort", "=", "'unknown'", "content", "=", "self", ".", "binary", ".", "fast_memory_load", "(", "self", ".", "addr", ",", "self", ".", "size", ",", "bytes", ")", "self", ".", "content", "=", "[", "content", "]" ]
We believe this was a pointer and symbolized it before. Now we want to desymbolize it. The following actions are performed: - Reload content from memory - Mark the sort as 'unknown' :return: None
[ "We", "believe", "this", "was", "a", "pointer", "and", "symbolized", "it", "before", ".", "Now", "we", "want", "to", "desymbolize", "it", "." ]
python
train
29.5
pkgw/pwkit
pwkit/fk10.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/fk10.py#L808-L835
def find_rt_coefficients_tot_intens(self, depth0=None): """Figure out total-intensity emission and absorption coefficients for the current parameters. **Argument** *depth0* (default None) A first guess to use for a good integration depth, in cm. If None, the most recent value is used. **Return value** A tuple ``(j_I, alpha_I)``, where: *j_I* The total intensity emission coefficient, in erg/s/cm^3/Hz/sr. *alpha_I* The total intensity absorption coefficient, in cm^-1. See :meth:`find_rt_coefficients` for an explanation how this routine works. This version merely postprocesses the results from that method to convert the coefficients to refer to total intensity. """ j_O, alpha_O, j_X, alpha_X = self.find_rt_coefficients(depth0=depth0) j_I = j_O + j_X alpha_I = 0.5 * (alpha_O + alpha_X) # uhh... right? return (j_I, alpha_I)
[ "def", "find_rt_coefficients_tot_intens", "(", "self", ",", "depth0", "=", "None", ")", ":", "j_O", ",", "alpha_O", ",", "j_X", ",", "alpha_X", "=", "self", ".", "find_rt_coefficients", "(", "depth0", "=", "depth0", ")", "j_I", "=", "j_O", "+", "j_X", "alpha_I", "=", "0.5", "*", "(", "alpha_O", "+", "alpha_X", ")", "# uhh... right?", "return", "(", "j_I", ",", "alpha_I", ")" ]
Figure out total-intensity emission and absorption coefficients for the current parameters. **Argument** *depth0* (default None) A first guess to use for a good integration depth, in cm. If None, the most recent value is used. **Return value** A tuple ``(j_I, alpha_I)``, where: *j_I* The total intensity emission coefficient, in erg/s/cm^3/Hz/sr. *alpha_I* The total intensity absorption coefficient, in cm^-1. See :meth:`find_rt_coefficients` for an explanation how this routine works. This version merely postprocesses the results from that method to convert the coefficients to refer to total intensity.
[ "Figure", "out", "total", "-", "intensity", "emission", "and", "absorption", "coefficients", "for", "the", "current", "parameters", "." ]
python
train
34.928571
GNS3/gns3-server
gns3server/controller/project.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L730-L833
def open(self): """ Load topology elements """ if self._status == "opened": return self.reset() self._loading = True self._status = "opened" path = self._topology_file() if not os.path.exists(path): self._loading = False return try: shutil.copy(path, path + ".backup") except OSError: pass try: project_data = load_topology(path) #load meta of project keys_to_load = [ "auto_start", "auto_close", "auto_open", "scene_height", "scene_width", "zoom", "show_layers", "snap_to_grid", "show_grid", "show_interface_labels" ] for key in keys_to_load: val = project_data.get(key, None) if val is not None: setattr(self, key, val) topology = project_data["topology"] for compute in topology.get("computes", []): yield from self.controller.add_compute(**compute) for node in topology.get("nodes", []): compute = self.controller.get_compute(node.pop("compute_id")) name = node.pop("name") node_id = node.pop("node_id", str(uuid.uuid4())) yield from self.add_node(compute, name, node_id, dump=False, **node) for link_data in topology.get("links", []): if 'link_id' not in link_data.keys(): # skip the link continue link = yield from self.add_link(link_id=link_data["link_id"]) if "filters" in link_data: yield from link.update_filters(link_data["filters"]) for node_link in link_data["nodes"]: node = self.get_node(node_link["node_id"]) port = node.get_port(node_link["adapter_number"], node_link["port_number"]) if port is None: log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name)) continue if port.link is not None: log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id)) continue yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False) if len(link.nodes) != 2: # a link should have 2 attached nodes, this can happen with corrupted projects yield from self.delete_link(link.id, force_delete=True) for drawing_data in topology.get("drawings", []): yield from self.add_drawing(dump=False, **drawing_data) self.dump() # We catch all error to be able to rollback the .gns3 to the previous state except Exception as e: for compute in list(self._project_created_on_compute): try: yield from compute.post("/projects/{}/close".format(self._id)) # We don't care if a compute is down at this step except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError): pass try: if os.path.exists(path + ".backup"): shutil.copy(path + ".backup", path) except (PermissionError, OSError): pass self._status = "closed" self._loading = False if isinstance(e, ComputeError): raise aiohttp.web.HTTPConflict(text=str(e)) else: raise e try: os.remove(path + ".backup") except OSError: pass self._loading = False # Should we start the nodes when project is open if self._auto_start: # Start all in the background without waiting for completion # we ignore errors because we want to let the user open # their project and fix it asyncio.async(self.start_all())
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "_status", "==", "\"opened\"", ":", "return", "self", ".", "reset", "(", ")", "self", ".", "_loading", "=", "True", "self", ".", "_status", "=", "\"opened\"", "path", "=", "self", ".", "_topology_file", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "self", ".", "_loading", "=", "False", "return", "try", ":", "shutil", ".", "copy", "(", "path", ",", "path", "+", "\".backup\"", ")", "except", "OSError", ":", "pass", "try", ":", "project_data", "=", "load_topology", "(", "path", ")", "#load meta of project", "keys_to_load", "=", "[", "\"auto_start\"", ",", "\"auto_close\"", ",", "\"auto_open\"", ",", "\"scene_height\"", ",", "\"scene_width\"", ",", "\"zoom\"", ",", "\"show_layers\"", ",", "\"snap_to_grid\"", ",", "\"show_grid\"", ",", "\"show_interface_labels\"", "]", "for", "key", "in", "keys_to_load", ":", "val", "=", "project_data", ".", "get", "(", "key", ",", "None", ")", "if", "val", "is", "not", "None", ":", "setattr", "(", "self", ",", "key", ",", "val", ")", "topology", "=", "project_data", "[", "\"topology\"", "]", "for", "compute", "in", "topology", ".", "get", "(", "\"computes\"", ",", "[", "]", ")", ":", "yield", "from", "self", ".", "controller", ".", "add_compute", "(", "*", "*", "compute", ")", "for", "node", "in", "topology", ".", "get", "(", "\"nodes\"", ",", "[", "]", ")", ":", "compute", "=", "self", ".", "controller", ".", "get_compute", "(", "node", ".", "pop", "(", "\"compute_id\"", ")", ")", "name", "=", "node", ".", "pop", "(", "\"name\"", ")", "node_id", "=", "node", ".", "pop", "(", "\"node_id\"", ",", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "yield", "from", "self", ".", "add_node", "(", "compute", ",", "name", ",", "node_id", ",", "dump", "=", "False", ",", "*", "*", "node", ")", "for", "link_data", "in", "topology", ".", "get", "(", "\"links\"", ",", "[", "]", ")", ":", "if", "'link_id'", "not", "in", "link_data", ".", "keys", "(", ")", ":", "# skip the link", "continue", "link", "=", "yield", "from", "self", ".", "add_link", "(", "link_id", "=", "link_data", "[", "\"link_id\"", "]", ")", "if", "\"filters\"", "in", "link_data", ":", "yield", "from", "link", ".", "update_filters", "(", "link_data", "[", "\"filters\"", "]", ")", "for", "node_link", "in", "link_data", "[", "\"nodes\"", "]", ":", "node", "=", "self", ".", "get_node", "(", "node_link", "[", "\"node_id\"", "]", ")", "port", "=", "node", ".", "get_port", "(", "node_link", "[", "\"adapter_number\"", "]", ",", "node_link", "[", "\"port_number\"", "]", ")", "if", "port", "is", "None", ":", "log", ".", "warning", "(", "\"Port {}/{} for {} not found\"", ".", "format", "(", "node_link", "[", "\"adapter_number\"", "]", ",", "node_link", "[", "\"port_number\"", "]", ",", "node", ".", "name", ")", ")", "continue", "if", "port", ".", "link", "is", "not", "None", ":", "log", ".", "warning", "(", "\"Port {}/{} is already connected to link ID {}\"", ".", "format", "(", "node_link", "[", "\"adapter_number\"", "]", ",", "node_link", "[", "\"port_number\"", "]", ",", "port", ".", "link", ".", "id", ")", ")", "continue", "yield", "from", "link", ".", "add_node", "(", "node", ",", "node_link", "[", "\"adapter_number\"", "]", ",", "node_link", "[", "\"port_number\"", "]", ",", "label", "=", "node_link", ".", "get", "(", "\"label\"", ")", ",", "dump", "=", "False", ")", "if", "len", "(", "link", ".", "nodes", ")", "!=", "2", ":", "# a link should have 2 attached nodes, this can happen with corrupted projects", "yield", "from", "self", ".", "delete_link", "(", "link", ".", "id", ",", "force_delete", "=", "True", ")", "for", "drawing_data", "in", "topology", ".", "get", "(", "\"drawings\"", ",", "[", "]", ")", ":", "yield", "from", "self", ".", "add_drawing", "(", "dump", "=", "False", ",", "*", "*", "drawing_data", ")", "self", ".", "dump", "(", ")", "# We catch all error to be able to rollback the .gns3 to the previous state", "except", "Exception", "as", "e", ":", "for", "compute", "in", "list", "(", "self", ".", "_project_created_on_compute", ")", ":", "try", ":", "yield", "from", "compute", ".", "post", "(", "\"/projects/{}/close\"", ".", "format", "(", "self", ".", "_id", ")", ")", "# We don't care if a compute is down at this step", "except", "(", "ComputeError", ",", "aiohttp", ".", "web", ".", "HTTPNotFound", ",", "aiohttp", ".", "web", ".", "HTTPConflict", ",", "aiohttp", ".", "ServerDisconnectedError", ")", ":", "pass", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "path", "+", "\".backup\"", ")", ":", "shutil", ".", "copy", "(", "path", "+", "\".backup\"", ",", "path", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "pass", "self", ".", "_status", "=", "\"closed\"", "self", ".", "_loading", "=", "False", "if", "isinstance", "(", "e", ",", "ComputeError", ")", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "str", "(", "e", ")", ")", "else", ":", "raise", "e", "try", ":", "os", ".", "remove", "(", "path", "+", "\".backup\"", ")", "except", "OSError", ":", "pass", "self", ".", "_loading", "=", "False", "# Should we start the nodes when project is open", "if", "self", ".", "_auto_start", ":", "# Start all in the background without waiting for completion", "# we ignore errors because we want to let the user open", "# their project and fix it", "asyncio", ".", "async", "(", "self", ".", "start_all", "(", ")", ")" ]
Load topology elements
[ "Load", "topology", "elements" ]
python
train
41.740385
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/loader.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/loader.py#L113-L129
def load_file_to_str(path): # type: (str) -> str """ Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file """ with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
[ "def", "load_file_to_str", "(", "path", ")", ":", "# type: (str) -> str", "with", "open", "(", "path", ",", "'rt'", ")", "as", "f", ":", "string", "=", "f", ".", "read", "(", ")", ".", "replace", "(", "linesep", ",", "''", ")", "if", "not", "string", ":", "raise", "LoadError", "(", "'%s file is empty!'", "%", "path", ")", "return", "string" ]
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
[ "Load", "file", "into", "a", "string", "removing", "newlines" ]
python
train
21.294118
7sDream/zhihu-py3
zhihu/answer.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L362-L374
def refresh(self): """刷新 Answer object 的属性. 例如赞同数增加了, 先调用 ``refresh()`` 再访问 upvote_num属性, 可获得更新后的赞同数. :return: None """ super().refresh() self._html = None self._upvote_num = None self._content = None self._collect_num = None self._comment_num = None
[ "def", "refresh", "(", "self", ")", ":", "super", "(", ")", ".", "refresh", "(", ")", "self", ".", "_html", "=", "None", "self", ".", "_upvote_num", "=", "None", "self", ".", "_content", "=", "None", "self", ".", "_collect_num", "=", "None", "self", ".", "_comment_num", "=", "None" ]
刷新 Answer object 的属性. 例如赞同数增加了, 先调用 ``refresh()`` 再访问 upvote_num属性, 可获得更新后的赞同数. :return: None
[ "刷新", "Answer", "object", "的属性", ".", "例如赞同数增加了", "先调用", "refresh", "()", "再访问", "upvote_num属性", "可获得更新后的赞同数", ".", ":", "return", ":", "None" ]
python
train
25.923077
pyviz/holoviews
holoviews/core/options.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L1252-L1266
def transfer_options(cls, obj, new_obj, backend=None): """ Transfers options for all backends from one object to another. Drops any options defined in the supplied drop list. """ backend = cls.current_backend if backend is None else backend type_name = type(new_obj).__name__ group = type_name if obj.group == type(obj).__name__ else obj.group spec = '.'.join([s for s in (type_name, group, obj.label) if s]) options = [] for group in Options._option_groups: opts = cls.lookup_options(backend, obj, group) if opts and opts.kwargs: options.append(Options(group, **opts.kwargs)) if options: StoreOptions.set_options(new_obj, {spec: options}, backend)
[ "def", "transfer_options", "(", "cls", ",", "obj", ",", "new_obj", ",", "backend", "=", "None", ")", ":", "backend", "=", "cls", ".", "current_backend", "if", "backend", "is", "None", "else", "backend", "type_name", "=", "type", "(", "new_obj", ")", ".", "__name__", "group", "=", "type_name", "if", "obj", ".", "group", "==", "type", "(", "obj", ")", ".", "__name__", "else", "obj", ".", "group", "spec", "=", "'.'", ".", "join", "(", "[", "s", "for", "s", "in", "(", "type_name", ",", "group", ",", "obj", ".", "label", ")", "if", "s", "]", ")", "options", "=", "[", "]", "for", "group", "in", "Options", ".", "_option_groups", ":", "opts", "=", "cls", ".", "lookup_options", "(", "backend", ",", "obj", ",", "group", ")", "if", "opts", "and", "opts", ".", "kwargs", ":", "options", ".", "append", "(", "Options", "(", "group", ",", "*", "*", "opts", ".", "kwargs", ")", ")", "if", "options", ":", "StoreOptions", ".", "set_options", "(", "new_obj", ",", "{", "spec", ":", "options", "}", ",", "backend", ")" ]
Transfers options for all backends from one object to another. Drops any options defined in the supplied drop list.
[ "Transfers", "options", "for", "all", "backends", "from", "one", "object", "to", "another", ".", "Drops", "any", "options", "defined", "in", "the", "supplied", "drop", "list", "." ]
python
train
50.533333
debrouwere/google-analytics
googleanalytics/commands/query.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/commands/query.py#L102-L130
def query(scope, blueprint, debug, output, with_metadata, realtime, **description): """ e.g. googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \ query pageviews \ --start yesterday --limit -10 --sort -pageviews \ --dimensions pagepath \ --debug """ if realtime: description['type'] = 'realtime' if blueprint: queries = from_blueprint(scope, blueprint) else: if not isinstance(scope, ga.account.Profile): raise ValueError("Account and webproperty needed for query.") queries = from_args(scope, **description) for query in queries: if debug: click.echo(query.build()) report = query.serialize(format=output, with_metadata=with_metadata) click.echo(report)
[ "def", "query", "(", "scope", ",", "blueprint", ",", "debug", ",", "output", ",", "with_metadata", ",", "realtime", ",", "*", "*", "description", ")", ":", "if", "realtime", ":", "description", "[", "'type'", "]", "=", "'realtime'", "if", "blueprint", ":", "queries", "=", "from_blueprint", "(", "scope", ",", "blueprint", ")", "else", ":", "if", "not", "isinstance", "(", "scope", ",", "ga", ".", "account", ".", "Profile", ")", ":", "raise", "ValueError", "(", "\"Account and webproperty needed for query.\"", ")", "queries", "=", "from_args", "(", "scope", ",", "*", "*", "description", ")", "for", "query", "in", "queries", ":", "if", "debug", ":", "click", ".", "echo", "(", "query", ".", "build", "(", ")", ")", "report", "=", "query", ".", "serialize", "(", "format", "=", "output", ",", "with_metadata", "=", "with_metadata", ")", "click", ".", "echo", "(", "report", ")" ]
e.g. googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \ query pageviews \ --start yesterday --limit -10 --sort -pageviews \ --dimensions pagepath \ --debug
[ "e", ".", "g", "." ]
python
train
29
google/grumpy
third_party/stdlib/getopt.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/getopt.py#L51-L92
def getopt(args, shortopts, longopts = []): """getopt(args, options[, long_options]) -> opts, args Parses command line options and parameter list. args is the argument list to be parsed, without the leading reference to the running program. Typically, this means "sys.argv[1:]". shortopts is the string of option letters that the script wants to recognize, with options that require an argument followed by a colon (i.e., the same format that Unix getopt() uses). If specified, longopts is a list of strings with the names of the long options which should be supported. The leading '--' characters should not be included in the option name. Options which require an argument should be followed by an equal sign ('='). The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of the first argument). Each option-and-value pair returned has the option as its first element, prefixed with a hyphen (e.g., '-x'), and the option argument as its second element, or an empty string if the option has no argument. The options occur in the list in the same order in which they were found, thus allowing multiple occurrences. Long and short options may be mixed. """ opts = [] if type(longopts) == type(""): longopts = [longopts] else: longopts = list(longopts) while args and args[0].startswith('-') and args[0] != '-': if args[0] == '--': args = args[1:] break if args[0].startswith('--'): opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) else: opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) return opts, args
[ "def", "getopt", "(", "args", ",", "shortopts", ",", "longopts", "=", "[", "]", ")", ":", "opts", "=", "[", "]", "if", "type", "(", "longopts", ")", "==", "type", "(", "\"\"", ")", ":", "longopts", "=", "[", "longopts", "]", "else", ":", "longopts", "=", "list", "(", "longopts", ")", "while", "args", "and", "args", "[", "0", "]", ".", "startswith", "(", "'-'", ")", "and", "args", "[", "0", "]", "!=", "'-'", ":", "if", "args", "[", "0", "]", "==", "'--'", ":", "args", "=", "args", "[", "1", ":", "]", "break", "if", "args", "[", "0", "]", ".", "startswith", "(", "'--'", ")", ":", "opts", ",", "args", "=", "do_longs", "(", "opts", ",", "args", "[", "0", "]", "[", "2", ":", "]", ",", "longopts", ",", "args", "[", "1", ":", "]", ")", "else", ":", "opts", ",", "args", "=", "do_shorts", "(", "opts", ",", "args", "[", "0", "]", "[", "1", ":", "]", ",", "shortopts", ",", "args", "[", "1", ":", "]", ")", "return", "opts", ",", "args" ]
getopt(args, options[, long_options]) -> opts, args Parses command line options and parameter list. args is the argument list to be parsed, without the leading reference to the running program. Typically, this means "sys.argv[1:]". shortopts is the string of option letters that the script wants to recognize, with options that require an argument followed by a colon (i.e., the same format that Unix getopt() uses). If specified, longopts is a list of strings with the names of the long options which should be supported. The leading '--' characters should not be included in the option name. Options which require an argument should be followed by an equal sign ('='). The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of the first argument). Each option-and-value pair returned has the option as its first element, prefixed with a hyphen (e.g., '-x'), and the option argument as its second element, or an empty string if the option has no argument. The options occur in the list in the same order in which they were found, thus allowing multiple occurrences. Long and short options may be mixed.
[ "getopt", "(", "args", "options", "[", "long_options", "]", ")", "-", ">", "opts", "args" ]
python
valid
43.809524
saltstack/salt
salt/modules/system.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system.py#L337-L405
def set_system_date_time(years=None, months=None, days=None, hours=None, minutes=None, seconds=None, utc_offset=None): ''' Set the system date and time. Each argument is an element of the date, but not required. If an element is not passed, the current system value for that element will be used. For example, if you don't pass the year, the current system year will be used. (Used by set_system_date and set_system_time) Updates hardware clock, if present, in addition to software (kernel) clock. :param int years: Years digit, ie: 2015 :param int months: Months digit: 1 - 12 :param int days: Days digit: 1 - 31 :param int hours: Hours digit: 0 - 23 :param int minutes: Minutes digit: 0 - 59 :param int seconds: Seconds digit: 0 - 59 :param str utc_offset: The utc offset in 4 digit (+0600) format with an optional sign (+/-). Will default to None which will use the local timezone. To set the time based off of UTC use "'+0000'". Note: if being passed through the command line will need to be quoted twice to allow negative offsets. :return: True if successful. Otherwise False. :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'" ''' # Get the current date/time date_time = _get_offset_time(utc_offset) # Check for passed values. If not passed, use current values if years is None: years = date_time.year if months is None: months = date_time.month if days is None: days = date_time.day if hours is None: hours = date_time.hour if minutes is None: minutes = date_time.minute if seconds is None: seconds = date_time.second try: new_datetime = datetime(years, months, days, hours, minutes, seconds, 0, date_time.tzinfo) except ValueError as err: raise SaltInvocationError(err.message) if not _date_bin_set_datetime(new_datetime): return False if has_settable_hwclock(): # Now that we've successfully set the software clock, we should # update hardware clock for time to persist though reboot. return _swclock_to_hwclock() return True
[ "def", "set_system_date_time", "(", "years", "=", "None", ",", "months", "=", "None", ",", "days", "=", "None", ",", "hours", "=", "None", ",", "minutes", "=", "None", ",", "seconds", "=", "None", ",", "utc_offset", "=", "None", ")", ":", "# Get the current date/time", "date_time", "=", "_get_offset_time", "(", "utc_offset", ")", "# Check for passed values. If not passed, use current values", "if", "years", "is", "None", ":", "years", "=", "date_time", ".", "year", "if", "months", "is", "None", ":", "months", "=", "date_time", ".", "month", "if", "days", "is", "None", ":", "days", "=", "date_time", ".", "day", "if", "hours", "is", "None", ":", "hours", "=", "date_time", ".", "hour", "if", "minutes", "is", "None", ":", "minutes", "=", "date_time", ".", "minute", "if", "seconds", "is", "None", ":", "seconds", "=", "date_time", ".", "second", "try", ":", "new_datetime", "=", "datetime", "(", "years", ",", "months", ",", "days", ",", "hours", ",", "minutes", ",", "seconds", ",", "0", ",", "date_time", ".", "tzinfo", ")", "except", "ValueError", "as", "err", ":", "raise", "SaltInvocationError", "(", "err", ".", "message", ")", "if", "not", "_date_bin_set_datetime", "(", "new_datetime", ")", ":", "return", "False", "if", "has_settable_hwclock", "(", ")", ":", "# Now that we've successfully set the software clock, we should", "# update hardware clock for time to persist though reboot.", "return", "_swclock_to_hwclock", "(", ")", "return", "True" ]
Set the system date and time. Each argument is an element of the date, but not required. If an element is not passed, the current system value for that element will be used. For example, if you don't pass the year, the current system year will be used. (Used by set_system_date and set_system_time) Updates hardware clock, if present, in addition to software (kernel) clock. :param int years: Years digit, ie: 2015 :param int months: Months digit: 1 - 12 :param int days: Days digit: 1 - 31 :param int hours: Hours digit: 0 - 23 :param int minutes: Minutes digit: 0 - 59 :param int seconds: Seconds digit: 0 - 59 :param str utc_offset: The utc offset in 4 digit (+0600) format with an optional sign (+/-). Will default to None which will use the local timezone. To set the time based off of UTC use "'+0000'". Note: if being passed through the command line will need to be quoted twice to allow negative offsets. :return: True if successful. Otherwise False. :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'"
[ "Set", "the", "system", "date", "and", "time", ".", "Each", "argument", "is", "an", "element", "of", "the", "date", "but", "not", "required", ".", "If", "an", "element", "is", "not", "passed", "the", "current", "system", "value", "for", "that", "element", "will", "be", "used", ".", "For", "example", "if", "you", "don", "t", "pass", "the", "year", "the", "current", "system", "year", "will", "be", "used", ".", "(", "Used", "by", "set_system_date", "and", "set_system_time", ")" ]
python
train
34.681159
ic-labs/django-icekit
icekit/abstract_models.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/abstract_models.py#L32-L37
def save(self, *args, **kwargs): """ Update ``self.modified``. """ self.modified = timezone.now() super(AbstractBaseModel, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "modified", "=", "timezone", ".", "now", "(", ")", "super", "(", "AbstractBaseModel", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Update ``self.modified``.
[ "Update", "self", ".", "modified", "." ]
python
train
30.833333
wandb/client
wandb/vendor/prompt_toolkit/layout/containers.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1414-L1506
def _scroll_when_linewrapping(self, ui_content, width, height, cli): """ Scroll to make sure the cursor position is visible and that we maintain the requested scroll offset. Set `self.horizontal_scroll/vertical_scroll`. """ scroll_offsets_bottom = self.scroll_offsets.bottom scroll_offsets_top = self.scroll_offsets.top # We don't have horizontal scrolling. self.horizontal_scroll = 0 # If the current line consumes more than the whole window height, # then we have to scroll vertically inside this line. (We don't take # the scroll offsets into account for this.) # Also, ignore the scroll offsets in this case. Just set the vertical # scroll to this line. if ui_content.get_height_for_line(ui_content.cursor_position.y, width) > height - scroll_offsets_top: # Calculate the height of the text before the cursor, with the line # containing the cursor included, and the character belowe the # cursor included as well. line = explode_tokens(ui_content.get_line(ui_content.cursor_position.y)) text_before_cursor = token_list_to_text(line[:ui_content.cursor_position.x + 1]) text_before_height = UIContent.get_height_for_text(text_before_cursor, width) # Adjust scroll offset. self.vertical_scroll = ui_content.cursor_position.y self.vertical_scroll_2 = min(text_before_height - 1, self.vertical_scroll_2) self.vertical_scroll_2 = max(0, text_before_height - height, self.vertical_scroll_2) return else: self.vertical_scroll_2 = 0 # Current line doesn't consume the whole height. Take scroll offsets into account. def get_min_vertical_scroll(): # Make sure that the cursor line is not below the bottom. # (Calculate how many lines can be shown between the cursor and the .) used_height = 0 prev_lineno = ui_content.cursor_position.y for lineno in range(ui_content.cursor_position.y, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > height - scroll_offsets_bottom: return prev_lineno else: prev_lineno = lineno return 0 def get_max_vertical_scroll(): # Make sure that the cursor line is not above the top. prev_lineno = ui_content.cursor_position.y used_height = 0 for lineno in range(ui_content.cursor_position.y - 1, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > scroll_offsets_top: return prev_lineno else: prev_lineno = lineno return prev_lineno def get_topmost_visible(): """ Calculate the upper most line that can be visible, while the bottom is still visible. We should not allow scroll more than this if `allow_scroll_beyond_bottom` is false. """ prev_lineno = ui_content.line_count - 1 used_height = 0 for lineno in range(ui_content.line_count - 1, -1, -1): used_height += ui_content.get_height_for_line(lineno, width) if used_height > height: return prev_lineno else: prev_lineno = lineno return prev_lineno # Scroll vertically. (Make sure that the whole line which contains the # cursor is visible. topmost_visible = get_topmost_visible() # Note: the `min(topmost_visible, ...)` is to make sure that we # don't require scrolling up because of the bottom scroll offset, # when we are at the end of the document. self.vertical_scroll = max(self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll())) self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll()) # Disallow scrolling beyond bottom? if not self.allow_scroll_beyond_bottom(cli): self.vertical_scroll = min(self.vertical_scroll, topmost_visible)
[ "def", "_scroll_when_linewrapping", "(", "self", ",", "ui_content", ",", "width", ",", "height", ",", "cli", ")", ":", "scroll_offsets_bottom", "=", "self", ".", "scroll_offsets", ".", "bottom", "scroll_offsets_top", "=", "self", ".", "scroll_offsets", ".", "top", "# We don't have horizontal scrolling.", "self", ".", "horizontal_scroll", "=", "0", "# If the current line consumes more than the whole window height,", "# then we have to scroll vertically inside this line. (We don't take", "# the scroll offsets into account for this.)", "# Also, ignore the scroll offsets in this case. Just set the vertical", "# scroll to this line.", "if", "ui_content", ".", "get_height_for_line", "(", "ui_content", ".", "cursor_position", ".", "y", ",", "width", ")", ">", "height", "-", "scroll_offsets_top", ":", "# Calculate the height of the text before the cursor, with the line", "# containing the cursor included, and the character belowe the", "# cursor included as well.", "line", "=", "explode_tokens", "(", "ui_content", ".", "get_line", "(", "ui_content", ".", "cursor_position", ".", "y", ")", ")", "text_before_cursor", "=", "token_list_to_text", "(", "line", "[", ":", "ui_content", ".", "cursor_position", ".", "x", "+", "1", "]", ")", "text_before_height", "=", "UIContent", ".", "get_height_for_text", "(", "text_before_cursor", ",", "width", ")", "# Adjust scroll offset.", "self", ".", "vertical_scroll", "=", "ui_content", ".", "cursor_position", ".", "y", "self", ".", "vertical_scroll_2", "=", "min", "(", "text_before_height", "-", "1", ",", "self", ".", "vertical_scroll_2", ")", "self", ".", "vertical_scroll_2", "=", "max", "(", "0", ",", "text_before_height", "-", "height", ",", "self", ".", "vertical_scroll_2", ")", "return", "else", ":", "self", ".", "vertical_scroll_2", "=", "0", "# Current line doesn't consume the whole height. Take scroll offsets into account.", "def", "get_min_vertical_scroll", "(", ")", ":", "# Make sure that the cursor line is not below the bottom.", "# (Calculate how many lines can be shown between the cursor and the .)", "used_height", "=", "0", "prev_lineno", "=", "ui_content", ".", "cursor_position", ".", "y", "for", "lineno", "in", "range", "(", "ui_content", ".", "cursor_position", ".", "y", ",", "-", "1", ",", "-", "1", ")", ":", "used_height", "+=", "ui_content", ".", "get_height_for_line", "(", "lineno", ",", "width", ")", "if", "used_height", ">", "height", "-", "scroll_offsets_bottom", ":", "return", "prev_lineno", "else", ":", "prev_lineno", "=", "lineno", "return", "0", "def", "get_max_vertical_scroll", "(", ")", ":", "# Make sure that the cursor line is not above the top.", "prev_lineno", "=", "ui_content", ".", "cursor_position", ".", "y", "used_height", "=", "0", "for", "lineno", "in", "range", "(", "ui_content", ".", "cursor_position", ".", "y", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "used_height", "+=", "ui_content", ".", "get_height_for_line", "(", "lineno", ",", "width", ")", "if", "used_height", ">", "scroll_offsets_top", ":", "return", "prev_lineno", "else", ":", "prev_lineno", "=", "lineno", "return", "prev_lineno", "def", "get_topmost_visible", "(", ")", ":", "\"\"\"\n Calculate the upper most line that can be visible, while the bottom\n is still visible. We should not allow scroll more than this if\n `allow_scroll_beyond_bottom` is false.\n \"\"\"", "prev_lineno", "=", "ui_content", ".", "line_count", "-", "1", "used_height", "=", "0", "for", "lineno", "in", "range", "(", "ui_content", ".", "line_count", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "used_height", "+=", "ui_content", ".", "get_height_for_line", "(", "lineno", ",", "width", ")", "if", "used_height", ">", "height", ":", "return", "prev_lineno", "else", ":", "prev_lineno", "=", "lineno", "return", "prev_lineno", "# Scroll vertically. (Make sure that the whole line which contains the", "# cursor is visible.", "topmost_visible", "=", "get_topmost_visible", "(", ")", "# Note: the `min(topmost_visible, ...)` is to make sure that we", "# don't require scrolling up because of the bottom scroll offset,", "# when we are at the end of the document.", "self", ".", "vertical_scroll", "=", "max", "(", "self", ".", "vertical_scroll", ",", "min", "(", "topmost_visible", ",", "get_min_vertical_scroll", "(", ")", ")", ")", "self", ".", "vertical_scroll", "=", "min", "(", "self", ".", "vertical_scroll", ",", "get_max_vertical_scroll", "(", ")", ")", "# Disallow scrolling beyond bottom?", "if", "not", "self", ".", "allow_scroll_beyond_bottom", "(", "cli", ")", ":", "self", ".", "vertical_scroll", "=", "min", "(", "self", ".", "vertical_scroll", ",", "topmost_visible", ")" ]
Scroll to make sure the cursor position is visible and that we maintain the requested scroll offset. Set `self.horizontal_scroll/vertical_scroll`.
[ "Scroll", "to", "make", "sure", "the", "cursor", "position", "is", "visible", "and", "that", "we", "maintain", "the", "requested", "scroll", "offset", "." ]
python
train
45.709677
hoh/Hereby
hereby.py
https://github.com/hoh/Hereby/blob/a5f8bcdcb667e1fe1e64f542162e15ec31741505/hereby.py#L38-L40
def abspath(self, path): """Return absolute path for a path relative to the current file.""" return os.path.abspath(os.path.join(os.path.dirname(self.path), path))
[ "def", "abspath", "(", "self", ",", "path", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", ",", "path", ")", ")" ]
Return absolute path for a path relative to the current file.
[ "Return", "absolute", "path", "for", "a", "path", "relative", "to", "the", "current", "file", "." ]
python
train
59
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L2569-L2717
def profile(self, name, parName, logemin=None, logemax=None, reoptimize=False, xvals=None, npts=None, savestate=True, **kwargs): """Profile the likelihood for the given source and parameter. Parameters ---------- name : str Source name. parName : str Parameter name. reoptimize : bool Re-fit nuisance parameters at each step in the scan. Note that enabling this option will only re-fit parameters that were free when the method was executed. Returns ------- lnlprofile : dict Dictionary containing results of likelihood scan. """ # Find the source name = self.roi.get_source_by_name(name).name par = self.like.normPar(name) parName = self.like.normPar(name).getName() idx = self.like.par_index(name, parName) bounds = self.like.model[idx].getBounds() value = self.like.model[idx].getValue() loge_bounds = self.loge_bounds optimizer = kwargs.get('optimizer', self.config['optimizer']) if savestate: saved_state = self._latch_state() # If parameter is fixed temporarily free it par.setFree(True) if optimizer['optimizer'] == 'NEWTON': self._create_fitcache() if logemin is not None or logemax is not None: loge_bounds = self.set_energy_range(logemin, logemax) else: loge_bounds = self.loge_bounds loglike0 = -self.like() if xvals is None: err = par.error() val = par.getValue() if err <= 0 or val <= 3 * err: xvals = 10 ** np.linspace(-2.0, 2.0, 51) if val < xvals[0]: xvals = np.insert(xvals, val, 0) else: xvals = np.linspace(0, 1, 25) xvals = np.concatenate((-1.0 * xvals[1:][::-1], xvals)) xvals = val * 10 ** xvals if np.isnan(xvals).any(): raise RuntimeError( "Parameter scan points for %s::%s include infinite value." % (name, parName)) # Update parameter bounds to encompass scan range try: self.like[idx].setBounds(min(min(xvals), value, bounds[0]), max(max(xvals), value, bounds[1])) except RuntimeError: self.logger.warning( "Caught failure on setBounds for %s::%s." % (name, parName)) o = {'xvals': xvals, 'npred': np.zeros(len(xvals)), 'npred_wt': np.zeros(len(xvals)), 'dnde': np.zeros(len(xvals)), 'flux': np.zeros(len(xvals)), 'eflux': np.zeros(len(xvals)), 'dloglike': np.zeros(len(xvals)), 'loglike': np.zeros(len(xvals)) } if reoptimize and hasattr(self.like.components[0].logLike, 'setUpdateFixedWeights'): for c in self.components: c.like.logLike.setUpdateFixedWeights(False) for i, x in enumerate(xvals): try: self.like[idx] = x except RuntimeError: self.logger.warning( "Caught failure on set for %s::%s: %.2f" % (name, parName, x)) if self.like.nFreeParams() > 1 and reoptimize: # Only reoptimize if not all frozen self.like.freeze(idx) fit_output = self._fit(errors=False, **optimizer) loglike1 = fit_output['loglike'] self.like.thaw(idx) else: loglike1 = -self.like() flux = self.like[name].flux(10 ** loge_bounds[0], 10 ** loge_bounds[1]) eflux = self.like[name].energyFlux(10 ** loge_bounds[0], 10 ** loge_bounds[1]) prefactor = self.like[idx] o['dloglike'][i] = loglike1 - loglike0 o['loglike'][i] = loglike1 o['dnde'][i] = prefactor.getTrueValue() o['flux'][i] = flux o['eflux'][i] = eflux cs = self.model_counts_spectrum(name, loge_bounds[0], loge_bounds[1], summed=True) o['npred'][i] += np.sum(cs) cs_wt = self.model_counts_spectrum(name, loge_bounds[0], loge_bounds[1], summed=True, weighted=True) o['npred_wt'][i] += np.sum(cs_wt) self.like[idx] = value if reoptimize and hasattr(self.like.components[0].logLike, 'setUpdateFixedWeights'): for c in self.components: c.like.logLike.setUpdateFixedWeights(True) # Restore model parameters to original values if savestate: saved_state.restore() self.like[idx].setBounds(*bounds) if logemin is not None or logemax is not None: self.set_energy_range(*loge_bounds) return o
[ "def", "profile", "(", "self", ",", "name", ",", "parName", ",", "logemin", "=", "None", ",", "logemax", "=", "None", ",", "reoptimize", "=", "False", ",", "xvals", "=", "None", ",", "npts", "=", "None", ",", "savestate", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Find the source", "name", "=", "self", ".", "roi", ".", "get_source_by_name", "(", "name", ")", ".", "name", "par", "=", "self", ".", "like", ".", "normPar", "(", "name", ")", "parName", "=", "self", ".", "like", ".", "normPar", "(", "name", ")", ".", "getName", "(", ")", "idx", "=", "self", ".", "like", ".", "par_index", "(", "name", ",", "parName", ")", "bounds", "=", "self", ".", "like", ".", "model", "[", "idx", "]", ".", "getBounds", "(", ")", "value", "=", "self", ".", "like", ".", "model", "[", "idx", "]", ".", "getValue", "(", ")", "loge_bounds", "=", "self", ".", "loge_bounds", "optimizer", "=", "kwargs", ".", "get", "(", "'optimizer'", ",", "self", ".", "config", "[", "'optimizer'", "]", ")", "if", "savestate", ":", "saved_state", "=", "self", ".", "_latch_state", "(", ")", "# If parameter is fixed temporarily free it", "par", ".", "setFree", "(", "True", ")", "if", "optimizer", "[", "'optimizer'", "]", "==", "'NEWTON'", ":", "self", ".", "_create_fitcache", "(", ")", "if", "logemin", "is", "not", "None", "or", "logemax", "is", "not", "None", ":", "loge_bounds", "=", "self", ".", "set_energy_range", "(", "logemin", ",", "logemax", ")", "else", ":", "loge_bounds", "=", "self", ".", "loge_bounds", "loglike0", "=", "-", "self", ".", "like", "(", ")", "if", "xvals", "is", "None", ":", "err", "=", "par", ".", "error", "(", ")", "val", "=", "par", ".", "getValue", "(", ")", "if", "err", "<=", "0", "or", "val", "<=", "3", "*", "err", ":", "xvals", "=", "10", "**", "np", ".", "linspace", "(", "-", "2.0", ",", "2.0", ",", "51", ")", "if", "val", "<", "xvals", "[", "0", "]", ":", "xvals", "=", "np", ".", "insert", "(", "xvals", ",", "val", ",", "0", ")", "else", ":", "xvals", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "25", ")", "xvals", "=", "np", ".", "concatenate", "(", "(", "-", "1.0", "*", "xvals", "[", "1", ":", "]", "[", ":", ":", "-", "1", "]", ",", "xvals", ")", ")", "xvals", "=", "val", "*", "10", "**", "xvals", "if", "np", ".", "isnan", "(", "xvals", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "\"Parameter scan points for %s::%s include infinite value.\"", "%", "(", "name", ",", "parName", ")", ")", "# Update parameter bounds to encompass scan range", "try", ":", "self", ".", "like", "[", "idx", "]", ".", "setBounds", "(", "min", "(", "min", "(", "xvals", ")", ",", "value", ",", "bounds", "[", "0", "]", ")", ",", "max", "(", "max", "(", "xvals", ")", ",", "value", ",", "bounds", "[", "1", "]", ")", ")", "except", "RuntimeError", ":", "self", ".", "logger", ".", "warning", "(", "\"Caught failure on setBounds for %s::%s.\"", "%", "(", "name", ",", "parName", ")", ")", "o", "=", "{", "'xvals'", ":", "xvals", ",", "'npred'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'npred_wt'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'dnde'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'flux'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'eflux'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'dloglike'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", ",", "'loglike'", ":", "np", ".", "zeros", "(", "len", "(", "xvals", ")", ")", "}", "if", "reoptimize", "and", "hasattr", "(", "self", ".", "like", ".", "components", "[", "0", "]", ".", "logLike", ",", "'setUpdateFixedWeights'", ")", ":", "for", "c", "in", "self", ".", "components", ":", "c", ".", "like", ".", "logLike", ".", "setUpdateFixedWeights", "(", "False", ")", "for", "i", ",", "x", "in", "enumerate", "(", "xvals", ")", ":", "try", ":", "self", ".", "like", "[", "idx", "]", "=", "x", "except", "RuntimeError", ":", "self", ".", "logger", ".", "warning", "(", "\"Caught failure on set for %s::%s: %.2f\"", "%", "(", "name", ",", "parName", ",", "x", ")", ")", "if", "self", ".", "like", ".", "nFreeParams", "(", ")", ">", "1", "and", "reoptimize", ":", "# Only reoptimize if not all frozen", "self", ".", "like", ".", "freeze", "(", "idx", ")", "fit_output", "=", "self", ".", "_fit", "(", "errors", "=", "False", ",", "*", "*", "optimizer", ")", "loglike1", "=", "fit_output", "[", "'loglike'", "]", "self", ".", "like", ".", "thaw", "(", "idx", ")", "else", ":", "loglike1", "=", "-", "self", ".", "like", "(", ")", "flux", "=", "self", ".", "like", "[", "name", "]", ".", "flux", "(", "10", "**", "loge_bounds", "[", "0", "]", ",", "10", "**", "loge_bounds", "[", "1", "]", ")", "eflux", "=", "self", ".", "like", "[", "name", "]", ".", "energyFlux", "(", "10", "**", "loge_bounds", "[", "0", "]", ",", "10", "**", "loge_bounds", "[", "1", "]", ")", "prefactor", "=", "self", ".", "like", "[", "idx", "]", "o", "[", "'dloglike'", "]", "[", "i", "]", "=", "loglike1", "-", "loglike0", "o", "[", "'loglike'", "]", "[", "i", "]", "=", "loglike1", "o", "[", "'dnde'", "]", "[", "i", "]", "=", "prefactor", ".", "getTrueValue", "(", ")", "o", "[", "'flux'", "]", "[", "i", "]", "=", "flux", "o", "[", "'eflux'", "]", "[", "i", "]", "=", "eflux", "cs", "=", "self", ".", "model_counts_spectrum", "(", "name", ",", "loge_bounds", "[", "0", "]", ",", "loge_bounds", "[", "1", "]", ",", "summed", "=", "True", ")", "o", "[", "'npred'", "]", "[", "i", "]", "+=", "np", ".", "sum", "(", "cs", ")", "cs_wt", "=", "self", ".", "model_counts_spectrum", "(", "name", ",", "loge_bounds", "[", "0", "]", ",", "loge_bounds", "[", "1", "]", ",", "summed", "=", "True", ",", "weighted", "=", "True", ")", "o", "[", "'npred_wt'", "]", "[", "i", "]", "+=", "np", ".", "sum", "(", "cs_wt", ")", "self", ".", "like", "[", "idx", "]", "=", "value", "if", "reoptimize", "and", "hasattr", "(", "self", ".", "like", ".", "components", "[", "0", "]", ".", "logLike", ",", "'setUpdateFixedWeights'", ")", ":", "for", "c", "in", "self", ".", "components", ":", "c", ".", "like", ".", "logLike", ".", "setUpdateFixedWeights", "(", "True", ")", "# Restore model parameters to original values", "if", "savestate", ":", "saved_state", ".", "restore", "(", ")", "self", ".", "like", "[", "idx", "]", ".", "setBounds", "(", "*", "bounds", ")", "if", "logemin", "is", "not", "None", "or", "logemax", "is", "not", "None", ":", "self", ".", "set_energy_range", "(", "*", "loge_bounds", ")", "return", "o" ]
Profile the likelihood for the given source and parameter. Parameters ---------- name : str Source name. parName : str Parameter name. reoptimize : bool Re-fit nuisance parameters at each step in the scan. Note that enabling this option will only re-fit parameters that were free when the method was executed. Returns ------- lnlprofile : dict Dictionary containing results of likelihood scan.
[ "Profile", "the", "likelihood", "for", "the", "given", "source", "and", "parameter", "." ]
python
train
34.369128
tanghaibao/goatools
goatools/go_enrichment.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L307-L319
def get_results_msg(self, results, study): """Return summary for GOEA results.""" # To convert msg list to string: "\n".join(msg) msg = [] if results: fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}" stu_items, num_gos_stu = self.get_item_cnt(results, "study_items") pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items") stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study))) pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n) msg.append("{POP} population items".format(POP=pop_txt)) msg.append("{STU} study items".format(STU=stu_txt)) return msg
[ "def", "get_results_msg", "(", "self", ",", "results", ",", "study", ")", ":", "# To convert msg list to string: \"\\n\".join(msg)", "msg", "=", "[", "]", "if", "results", ":", "fmt", "=", "\"{M:6,} GO terms are associated with {N:6,} of {NT:6,}\"", "stu_items", ",", "num_gos_stu", "=", "self", ".", "get_item_cnt", "(", "results", ",", "\"study_items\"", ")", "pop_items", ",", "num_gos_pop", "=", "self", ".", "get_item_cnt", "(", "results", ",", "\"pop_items\"", ")", "stu_txt", "=", "fmt", ".", "format", "(", "N", "=", "len", "(", "stu_items", ")", ",", "M", "=", "num_gos_stu", ",", "NT", "=", "len", "(", "set", "(", "study", ")", ")", ")", "pop_txt", "=", "fmt", ".", "format", "(", "N", "=", "len", "(", "pop_items", ")", ",", "M", "=", "num_gos_pop", ",", "NT", "=", "self", ".", "pop_n", ")", "msg", ".", "append", "(", "\"{POP} population items\"", ".", "format", "(", "POP", "=", "pop_txt", ")", ")", "msg", ".", "append", "(", "\"{STU} study items\"", ".", "format", "(", "STU", "=", "stu_txt", ")", ")", "return", "msg" ]
Return summary for GOEA results.
[ "Return", "summary", "for", "GOEA", "results", "." ]
python
train
55.307692
mcocdawc/chemcoord
src/chemcoord/cartesian_coordinates/_cartesian_class_core.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_core.py#L1273-L1342
def reindex_similar(self, other, n_sphere=4): """Reindex ``other`` to be similarly indexed as ``self``. Returns a reindexed copy of ``other`` that minimizes the distance for each atom to itself in the same chemical environemt from ``self`` to ``other``. Read more about the definition of the chemical environment in :func:`Cartesian.partition_chem_env` .. note:: It is necessary to align ``self`` and other before applying this method. This can be done via :meth:`~Cartesian.align`. .. note:: It is probably necessary to improve the result using :meth:`~Cartesian.change_numbering()`. Args: other (Cartesian): n_sphere (int): Wrapper around the argument for :meth:`~Cartesian.partition_chem_env`. Returns: Cartesian: Reindexed version of other """ def make_subset_similar(m1, subset1, m2, subset2, index_dct): """Changes index_dct INPLACE""" coords = ['x', 'y', 'z'] index1 = list(subset1) for m1_i in index1: dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords], subset2, sort=True) m2_i = dist_m2_to_m1_i.index[0] dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance'] m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords] counter = itertools.count() found = False while not found: if m2_i in index_dct.keys(): old_m1_pos = m1.loc[index_dct[m2_i], coords] if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos): index1.append(index_dct[m2_i]) index_dct[m2_i] = m1_i found = True else: m2_i = dist_m2_to_m1_i.index[next(counter)] dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance'] m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords] else: index_dct[m2_i] = m1_i found = True return index_dct molecule1 = self.copy() molecule2 = other.copy() partition1 = molecule1.partition_chem_env(n_sphere) partition2 = molecule2.partition_chem_env(n_sphere) index_dct = {} for key in partition1: message = ('You have chemically different molecules, regarding ' 'the topology of their connectivity.') assert len(partition1[key]) == len(partition2[key]), message index_dct = make_subset_similar(molecule1, partition1[key], molecule2, partition2[key], index_dct) molecule2.index = [index_dct[i] for i in molecule2.index] return molecule2.loc[molecule1.index]
[ "def", "reindex_similar", "(", "self", ",", "other", ",", "n_sphere", "=", "4", ")", ":", "def", "make_subset_similar", "(", "m1", ",", "subset1", ",", "m2", ",", "subset2", ",", "index_dct", ")", ":", "\"\"\"Changes index_dct INPLACE\"\"\"", "coords", "=", "[", "'x'", ",", "'y'", ",", "'z'", "]", "index1", "=", "list", "(", "subset1", ")", "for", "m1_i", "in", "index1", ":", "dist_m2_to_m1_i", "=", "m2", ".", "get_distance_to", "(", "m1", ".", "loc", "[", "m1_i", ",", "coords", "]", ",", "subset2", ",", "sort", "=", "True", ")", "m2_i", "=", "dist_m2_to_m1_i", ".", "index", "[", "0", "]", "dist_new", "=", "dist_m2_to_m1_i", ".", "loc", "[", "m2_i", ",", "'distance'", "]", "m2_pos_i", "=", "dist_m2_to_m1_i", ".", "loc", "[", "m2_i", ",", "coords", "]", "counter", "=", "itertools", ".", "count", "(", ")", "found", "=", "False", "while", "not", "found", ":", "if", "m2_i", "in", "index_dct", ".", "keys", "(", ")", ":", "old_m1_pos", "=", "m1", ".", "loc", "[", "index_dct", "[", "m2_i", "]", ",", "coords", "]", "if", "dist_new", "<", "np", ".", "linalg", ".", "norm", "(", "m2_pos_i", "-", "old_m1_pos", ")", ":", "index1", ".", "append", "(", "index_dct", "[", "m2_i", "]", ")", "index_dct", "[", "m2_i", "]", "=", "m1_i", "found", "=", "True", "else", ":", "m2_i", "=", "dist_m2_to_m1_i", ".", "index", "[", "next", "(", "counter", ")", "]", "dist_new", "=", "dist_m2_to_m1_i", ".", "loc", "[", "m2_i", ",", "'distance'", "]", "m2_pos_i", "=", "dist_m2_to_m1_i", ".", "loc", "[", "m2_i", ",", "coords", "]", "else", ":", "index_dct", "[", "m2_i", "]", "=", "m1_i", "found", "=", "True", "return", "index_dct", "molecule1", "=", "self", ".", "copy", "(", ")", "molecule2", "=", "other", ".", "copy", "(", ")", "partition1", "=", "molecule1", ".", "partition_chem_env", "(", "n_sphere", ")", "partition2", "=", "molecule2", ".", "partition_chem_env", "(", "n_sphere", ")", "index_dct", "=", "{", "}", "for", "key", "in", "partition1", ":", "message", "=", "(", "'You have chemically different molecules, regarding '", "'the topology of their connectivity.'", ")", "assert", "len", "(", "partition1", "[", "key", "]", ")", "==", "len", "(", "partition2", "[", "key", "]", ")", ",", "message", "index_dct", "=", "make_subset_similar", "(", "molecule1", ",", "partition1", "[", "key", "]", ",", "molecule2", ",", "partition2", "[", "key", "]", ",", "index_dct", ")", "molecule2", ".", "index", "=", "[", "index_dct", "[", "i", "]", "for", "i", "in", "molecule2", ".", "index", "]", "return", "molecule2", ".", "loc", "[", "molecule1", ".", "index", "]" ]
Reindex ``other`` to be similarly indexed as ``self``. Returns a reindexed copy of ``other`` that minimizes the distance for each atom to itself in the same chemical environemt from ``self`` to ``other``. Read more about the definition of the chemical environment in :func:`Cartesian.partition_chem_env` .. note:: It is necessary to align ``self`` and other before applying this method. This can be done via :meth:`~Cartesian.align`. .. note:: It is probably necessary to improve the result using :meth:`~Cartesian.change_numbering()`. Args: other (Cartesian): n_sphere (int): Wrapper around the argument for :meth:`~Cartesian.partition_chem_env`. Returns: Cartesian: Reindexed version of other
[ "Reindex", "other", "to", "be", "similarly", "indexed", "as", "self", "." ]
python
train
43
dcos/shakedown
shakedown/dcos/file.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/file.py#L69-L78
def copy_file_to_master( file_path, remote_path='.', username=None, key_path=None ): """ Copy a file to the Mesos master """ return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path)
[ "def", "copy_file_to_master", "(", "file_path", ",", "remote_path", "=", "'.'", ",", "username", "=", "None", ",", "key_path", "=", "None", ")", ":", "return", "copy_file", "(", "shakedown", ".", "master_ip", "(", ")", ",", "file_path", ",", "remote_path", ",", "username", ",", "key_path", ")" ]
Copy a file to the Mesos master
[ "Copy", "a", "file", "to", "the", "Mesos", "master" ]
python
train
24.4
thriftrw/thriftrw-python
thriftrw/compile/compiler.py
https://github.com/thriftrw/thriftrw-python/blob/4f2f71acd7a0ac716c9ea5cdcea2162aa561304a/thriftrw/compile/compiler.py#L164-L278
def compile(self, name, contents, path=None): """Compile the given Thrift document into a Python module. The generated module contains, .. py:attribute:: __services__ A collection of generated classes for all services defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``services`` to ``__services__``. .. py:attribute:: __types__ A collection of generated types for all types defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``types`` to ``__types__``. .. py:attribute:: __includes__ A collection of modules included by this module. .. versionadded:: 1.0 .. py:attribute:: __constants__ A mapping of constant name to value for all constants defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``constants`` to ``__constants__``. .. py:attribute:: __thrift_source__ Contents of the .thrift file from which this module was compiled. .. versionadded:: 1.1 .. py:function:: dumps(obj) Serializes the given object using the protocol the compiler was instantiated with. .. py:function:: loads(cls, payload) Deserializes an object of type ``cls`` from ``payload`` using the protocol the compiler was instantiated with. .. py:function:: dumps.message(obj, seqid=0) Serializes the given request or response into a :py:class:`~thriftrw.wire.Message` using the protocol that the compiler was instantiated with. See :ref:`calling-apache-thrift`. .. versionadded:: 1.0 .. py:function:: loads.message(service, payload) Deserializes a :py:class:`~thriftrw.wire.Message` from ``payload`` using the protocol the compiler was instantiated with. A request or response of a method defined in the given service is parsed in the message body. See :ref:`calling-apache-thrift`. .. versionadded:: 1.0 And one class each for every struct, union, exception, enum, and service defined in the IDL. Service classes have references to :py:class:`thriftrw.spec.ServiceFunction` objects for each method defined in the service. :param str name: Name of the Thrift document. This will be the name of the generated module. :param str contents: Thrift document to compile :param str path: Path to the Thrift file being compiled. If not specified, imports from within the Thrift file will be disallowed. :returns: ModuleSpec of the generated module. """ assert name if path: path = os.path.abspath(path) if path in self._module_specs: return self._module_specs[path] module_spec = ModuleSpec(name, self.protocol, path, contents) if path: self._module_specs[path] = module_spec program = self.parser.parse(contents) header_processor = HeaderProcessor(self, module_spec, self.include_as) for header in program.headers: header.apply(header_processor) generator = Generator(module_spec.scope, strict=self.strict) for definition in program.definitions: generator.process(definition) return module_spec
[ "def", "compile", "(", "self", ",", "name", ",", "contents", ",", "path", "=", "None", ")", ":", "assert", "name", "if", "path", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "path", "in", "self", ".", "_module_specs", ":", "return", "self", ".", "_module_specs", "[", "path", "]", "module_spec", "=", "ModuleSpec", "(", "name", ",", "self", ".", "protocol", ",", "path", ",", "contents", ")", "if", "path", ":", "self", ".", "_module_specs", "[", "path", "]", "=", "module_spec", "program", "=", "self", ".", "parser", ".", "parse", "(", "contents", ")", "header_processor", "=", "HeaderProcessor", "(", "self", ",", "module_spec", ",", "self", ".", "include_as", ")", "for", "header", "in", "program", ".", "headers", ":", "header", ".", "apply", "(", "header_processor", ")", "generator", "=", "Generator", "(", "module_spec", ".", "scope", ",", "strict", "=", "self", ".", "strict", ")", "for", "definition", "in", "program", ".", "definitions", ":", "generator", ".", "process", "(", "definition", ")", "return", "module_spec" ]
Compile the given Thrift document into a Python module. The generated module contains, .. py:attribute:: __services__ A collection of generated classes for all services defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``services`` to ``__services__``. .. py:attribute:: __types__ A collection of generated types for all types defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``types`` to ``__types__``. .. py:attribute:: __includes__ A collection of modules included by this module. .. versionadded:: 1.0 .. py:attribute:: __constants__ A mapping of constant name to value for all constants defined in the thrift file. .. versionchanged:: 1.0 Renamed from ``constants`` to ``__constants__``. .. py:attribute:: __thrift_source__ Contents of the .thrift file from which this module was compiled. .. versionadded:: 1.1 .. py:function:: dumps(obj) Serializes the given object using the protocol the compiler was instantiated with. .. py:function:: loads(cls, payload) Deserializes an object of type ``cls`` from ``payload`` using the protocol the compiler was instantiated with. .. py:function:: dumps.message(obj, seqid=0) Serializes the given request or response into a :py:class:`~thriftrw.wire.Message` using the protocol that the compiler was instantiated with. See :ref:`calling-apache-thrift`. .. versionadded:: 1.0 .. py:function:: loads.message(service, payload) Deserializes a :py:class:`~thriftrw.wire.Message` from ``payload`` using the protocol the compiler was instantiated with. A request or response of a method defined in the given service is parsed in the message body. See :ref:`calling-apache-thrift`. .. versionadded:: 1.0 And one class each for every struct, union, exception, enum, and service defined in the IDL. Service classes have references to :py:class:`thriftrw.spec.ServiceFunction` objects for each method defined in the service. :param str name: Name of the Thrift document. This will be the name of the generated module. :param str contents: Thrift document to compile :param str path: Path to the Thrift file being compiled. If not specified, imports from within the Thrift file will be disallowed. :returns: ModuleSpec of the generated module.
[ "Compile", "the", "given", "Thrift", "document", "into", "a", "Python", "module", "." ]
python
train
30.521739
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/jar.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/jar.py#L92-L197
def Jar(env, target = None, source = [], *args, **kw): """ A pseudo-Builder wrapper around the separate Jar sources{File,Dir} Builders. """ # jar target should not be a list so assume they passed # no target and want implicit target to be made and the arg # was actaully the list of sources if SCons.Util.is_List(target) and source == []: SCons.Warnings.Warning("Making implicit target jar file, " + "and treating the list as sources") source = target target = None # mutiple targets pass so build each target the same from the # same source #TODO Maybe this should only be done once, and the result copied # for each target since it should result in the same? if SCons.Util.is_List(target) and SCons.Util.is_List(source): jars = [] for single_target in target: jars += env.Jar( target = single_target, source = source, *args, **kw) return jars # they passed no target so make a target implicitly if target == None: try: # make target from the first source file target = os.path.splitext(str(source[0]))[0] + env.subst('$JARSUFFIX') except: # something strange is happening but attempt anyways SCons.Warnings.Warning("Could not make implicit target from sources, using directory") target = os.path.basename(str(env.Dir('.'))) + env.subst('$JARSUFFIX') # make lists out of our target and sources if not SCons.Util.is_List(target): target = [target] if not SCons.Util.is_List(source): source = [source] # setup for checking through all the sources and handle accordingly java_class_suffix = env.subst('$JAVACLASSSUFFIX') java_suffix = env.subst('$JAVASUFFIX') target_classes = [] # function for determining what to do with a file and not a directory # if its already a class file then it can be used as a # source for jar, otherwise turn it into a class file then # return the source def file_to_class(s): if(str(_my_normcase(s)).endswith(java_suffix)): return env.JavaClassFile(source = s, *args, **kw) else: return [env.fs.File(s)] # In the case that we are passed just string to a node which is directory # but does not exist, we need to check all the current targets to see if # that directory is going to exist so we can add it as a source to Jar builder def get_all_targets(env, node='.'): def get_all_targets_iter(env, node): if node.has_builder(): yield node for kid in node.all_children(): for kid in get_all_targets(env, kid): yield kid node = env.arg2nodes(node, env.fs.Entry)[0] return list(get_all_targets_iter(env, node)) # loop through the sources and handle each accordingly # the goal here is to get all the source files into a class # file or a directory that contains class files for s in source: s = env.subst(s) if isinstance(s, SCons.Node.FS.Base): if isinstance(s, SCons.Node.FS.File): # found a file so make sure its a class file target_classes.extend(file_to_class(s)) else: # found a dir so make sure its a dir of class files target_classes.extend(env.JavaClassDir(source = env.fs.Dir(s), *args, **kw)) else: if os.path.isfile(s): # found a file that exists on the FS, make sure its a class file target_classes.extend(file_to_class(s)) elif os.path.isdir(s): # found a dir on the FS, add it as a dir of class files target_classes.append(env.fs.Dir(s)) elif s[-len(java_suffix):] == java_suffix or s[-len(java_class_suffix):] == java_class_suffix: # found a file that may not exists and is only a string # so add it after converting it to a class file target_classes.extend(file_to_class(s)) else: # found a swig file so add it after converting it to class files if(os.path.splitext(str(s))[1] == ".i"): target_classes.extend(env.JavaClassFile(source = s, *args, **kw)) else: # found a directory that does not yet exist, but can exist as a node # check the target nodes to make sure it will be built, then add # it as a source for node in get_all_targets(env): if(s in str(node) and os.path.splitext(str(node))[1] == ""): target_classes.append(node) # at this point all our sources have been converted to classes or directories of class # so pass it to the Jar builder return env.JarFile(target = target, source = target_classes, *args, **kw)
[ "def", "Jar", "(", "env", ",", "target", "=", "None", ",", "source", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "# jar target should not be a list so assume they passed", "# no target and want implicit target to be made and the arg", "# was actaully the list of sources", "if", "SCons", ".", "Util", ".", "is_List", "(", "target", ")", "and", "source", "==", "[", "]", ":", "SCons", ".", "Warnings", ".", "Warning", "(", "\"Making implicit target jar file, \"", "+", "\"and treating the list as sources\"", ")", "source", "=", "target", "target", "=", "None", "# mutiple targets pass so build each target the same from the ", "# same source", "#TODO Maybe this should only be done once, and the result copied", "# for each target since it should result in the same?", "if", "SCons", ".", "Util", ".", "is_List", "(", "target", ")", "and", "SCons", ".", "Util", ".", "is_List", "(", "source", ")", ":", "jars", "=", "[", "]", "for", "single_target", "in", "target", ":", "jars", "+=", "env", ".", "Jar", "(", "target", "=", "single_target", ",", "source", "=", "source", ",", "*", "args", ",", "*", "*", "kw", ")", "return", "jars", "# they passed no target so make a target implicitly", "if", "target", "==", "None", ":", "try", ":", "# make target from the first source file", "target", "=", "os", ".", "path", ".", "splitext", "(", "str", "(", "source", "[", "0", "]", ")", ")", "[", "0", "]", "+", "env", ".", "subst", "(", "'$JARSUFFIX'", ")", "except", ":", "# something strange is happening but attempt anyways", "SCons", ".", "Warnings", ".", "Warning", "(", "\"Could not make implicit target from sources, using directory\"", ")", "target", "=", "os", ".", "path", ".", "basename", "(", "str", "(", "env", ".", "Dir", "(", "'.'", ")", ")", ")", "+", "env", ".", "subst", "(", "'$JARSUFFIX'", ")", "# make lists out of our target and sources", "if", "not", "SCons", ".", "Util", ".", "is_List", "(", "target", ")", ":", "target", "=", "[", "target", "]", "if", "not", "SCons", ".", "Util", ".", "is_List", "(", "source", ")", ":", "source", "=", "[", "source", "]", "# setup for checking through all the sources and handle accordingly", "java_class_suffix", "=", "env", ".", "subst", "(", "'$JAVACLASSSUFFIX'", ")", "java_suffix", "=", "env", ".", "subst", "(", "'$JAVASUFFIX'", ")", "target_classes", "=", "[", "]", "# function for determining what to do with a file and not a directory", "# if its already a class file then it can be used as a", "# source for jar, otherwise turn it into a class file then", "# return the source", "def", "file_to_class", "(", "s", ")", ":", "if", "(", "str", "(", "_my_normcase", "(", "s", ")", ")", ".", "endswith", "(", "java_suffix", ")", ")", ":", "return", "env", ".", "JavaClassFile", "(", "source", "=", "s", ",", "*", "args", ",", "*", "*", "kw", ")", "else", ":", "return", "[", "env", ".", "fs", ".", "File", "(", "s", ")", "]", "# In the case that we are passed just string to a node which is directory", "# but does not exist, we need to check all the current targets to see if", "# that directory is going to exist so we can add it as a source to Jar builder", "def", "get_all_targets", "(", "env", ",", "node", "=", "'.'", ")", ":", "def", "get_all_targets_iter", "(", "env", ",", "node", ")", ":", "if", "node", ".", "has_builder", "(", ")", ":", "yield", "node", "for", "kid", "in", "node", ".", "all_children", "(", ")", ":", "for", "kid", "in", "get_all_targets", "(", "env", ",", "kid", ")", ":", "yield", "kid", "node", "=", "env", ".", "arg2nodes", "(", "node", ",", "env", ".", "fs", ".", "Entry", ")", "[", "0", "]", "return", "list", "(", "get_all_targets_iter", "(", "env", ",", "node", ")", ")", "# loop through the sources and handle each accordingly", "# the goal here is to get all the source files into a class", "# file or a directory that contains class files", "for", "s", "in", "source", ":", "s", "=", "env", ".", "subst", "(", "s", ")", "if", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "Base", ")", ":", "if", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "File", ")", ":", "# found a file so make sure its a class file", "target_classes", ".", "extend", "(", "file_to_class", "(", "s", ")", ")", "else", ":", "# found a dir so make sure its a dir of class files", "target_classes", ".", "extend", "(", "env", ".", "JavaClassDir", "(", "source", "=", "env", ".", "fs", ".", "Dir", "(", "s", ")", ",", "*", "args", ",", "*", "*", "kw", ")", ")", "else", ":", "if", "os", ".", "path", ".", "isfile", "(", "s", ")", ":", "# found a file that exists on the FS, make sure its a class file", "target_classes", ".", "extend", "(", "file_to_class", "(", "s", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "s", ")", ":", "# found a dir on the FS, add it as a dir of class files", "target_classes", ".", "append", "(", "env", ".", "fs", ".", "Dir", "(", "s", ")", ")", "elif", "s", "[", "-", "len", "(", "java_suffix", ")", ":", "]", "==", "java_suffix", "or", "s", "[", "-", "len", "(", "java_class_suffix", ")", ":", "]", "==", "java_class_suffix", ":", "# found a file that may not exists and is only a string", "# so add it after converting it to a class file", "target_classes", ".", "extend", "(", "file_to_class", "(", "s", ")", ")", "else", ":", "# found a swig file so add it after converting it to class files", "if", "(", "os", ".", "path", ".", "splitext", "(", "str", "(", "s", ")", ")", "[", "1", "]", "==", "\".i\"", ")", ":", "target_classes", ".", "extend", "(", "env", ".", "JavaClassFile", "(", "source", "=", "s", ",", "*", "args", ",", "*", "*", "kw", ")", ")", "else", ":", "# found a directory that does not yet exist, but can exist as a node", "# check the target nodes to make sure it will be built, then add", "# it as a source", "for", "node", "in", "get_all_targets", "(", "env", ")", ":", "if", "(", "s", "in", "str", "(", "node", ")", "and", "os", ".", "path", ".", "splitext", "(", "str", "(", "node", ")", ")", "[", "1", "]", "==", "\"\"", ")", ":", "target_classes", ".", "append", "(", "node", ")", "# at this point all our sources have been converted to classes or directories of class", "# so pass it to the Jar builder", "return", "env", ".", "JarFile", "(", "target", "=", "target", ",", "source", "=", "target_classes", ",", "*", "args", ",", "*", "*", "kw", ")" ]
A pseudo-Builder wrapper around the separate Jar sources{File,Dir} Builders.
[ "A", "pseudo", "-", "Builder", "wrapper", "around", "the", "separate", "Jar", "sources", "{", "File", "Dir", "}", "Builders", "." ]
python
train
46.40566
ishepard/pydriller
pydriller/git_repository.py
https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L102-L109
def get_commit(self, commit_id: str) -> Commit: """ Get the specified commit. :param str commit_id: hash of the commit to analyze :return: Commit """ return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
[ "def", "get_commit", "(", "self", ",", "commit_id", ":", "str", ")", "->", "Commit", ":", "return", "Commit", "(", "self", ".", "repo", ".", "commit", "(", "commit_id", ")", ",", "self", ".", "path", ",", "self", ".", "main_branch", ")" ]
Get the specified commit. :param str commit_id: hash of the commit to analyze :return: Commit
[ "Get", "the", "specified", "commit", "." ]
python
train
32.875
qubell/contrib-python-qubell-client
qubell/api/tools/__init__.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/tools/__init__.py#L145-L191
def dump(node): """ Dump initialized object structure to yaml """ from qubell.api.private.platform import Auth, QubellPlatform from qubell.api.private.organization import Organization from qubell.api.private.application import Application from qubell.api.private.instance import Instance from qubell.api.private.revision import Revision from qubell.api.private.environment import Environment from qubell.api.private.zone import Zone from qubell.api.private.manifest import Manifest # Exclude keys from dump # Format: { 'ClassName': ['fields', 'to', 'exclude']} exclusion_list = { Auth: ['cookies'], QubellPlatform:['auth', ], Organization: ['auth', 'organizationId', 'zone'], Application: ['auth', 'applicationId', 'organization'], Instance: ['auth', 'instanceId', 'application'], Manifest: ['name', 'content'], Revision: ['auth', 'revisionId'], Environment: ['auth', 'environmentId', 'organization'], Zone: ['auth', 'zoneId', 'organization'], } def obj_presenter(dumper, obj): for x in exclusion_list.keys(): if isinstance(obj, x): # Find class fields = obj.__dict__.copy() for excl_item in exclusion_list[x]: try: fields.pop(excl_item) except: log.warn('No item %s in object %s' % (excl_item, x)) return dumper.represent_mapping('tag:yaml.org,2002:map', fields) return dumper.represent_mapping('tag:yaml.org,2002:map', obj.__dict__) noalias_dumper = yaml.dumper.Dumper noalias_dumper.ignore_aliases = lambda self, data: True yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value)) yaml.add_multi_representer(object, obj_presenter) serialized = yaml.dump(node, default_flow_style=False, Dumper=noalias_dumper) return serialized
[ "def", "dump", "(", "node", ")", ":", "from", "qubell", ".", "api", ".", "private", ".", "platform", "import", "Auth", ",", "QubellPlatform", "from", "qubell", ".", "api", ".", "private", ".", "organization", "import", "Organization", "from", "qubell", ".", "api", ".", "private", ".", "application", "import", "Application", "from", "qubell", ".", "api", ".", "private", ".", "instance", "import", "Instance", "from", "qubell", ".", "api", ".", "private", ".", "revision", "import", "Revision", "from", "qubell", ".", "api", ".", "private", ".", "environment", "import", "Environment", "from", "qubell", ".", "api", ".", "private", ".", "zone", "import", "Zone", "from", "qubell", ".", "api", ".", "private", ".", "manifest", "import", "Manifest", "# Exclude keys from dump", "# Format: { 'ClassName': ['fields', 'to', 'exclude']}", "exclusion_list", "=", "{", "Auth", ":", "[", "'cookies'", "]", ",", "QubellPlatform", ":", "[", "'auth'", ",", "]", ",", "Organization", ":", "[", "'auth'", ",", "'organizationId'", ",", "'zone'", "]", ",", "Application", ":", "[", "'auth'", ",", "'applicationId'", ",", "'organization'", "]", ",", "Instance", ":", "[", "'auth'", ",", "'instanceId'", ",", "'application'", "]", ",", "Manifest", ":", "[", "'name'", ",", "'content'", "]", ",", "Revision", ":", "[", "'auth'", ",", "'revisionId'", "]", ",", "Environment", ":", "[", "'auth'", ",", "'environmentId'", ",", "'organization'", "]", ",", "Zone", ":", "[", "'auth'", ",", "'zoneId'", ",", "'organization'", "]", ",", "}", "def", "obj_presenter", "(", "dumper", ",", "obj", ")", ":", "for", "x", "in", "exclusion_list", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "obj", ",", "x", ")", ":", "# Find class", "fields", "=", "obj", ".", "__dict__", ".", "copy", "(", ")", "for", "excl_item", "in", "exclusion_list", "[", "x", "]", ":", "try", ":", "fields", ".", "pop", "(", "excl_item", ")", "except", ":", "log", ".", "warn", "(", "'No item %s in object %s'", "%", "(", "excl_item", ",", "x", ")", ")", "return", "dumper", ".", "represent_mapping", "(", "'tag:yaml.org,2002:map'", ",", "fields", ")", "return", "dumper", ".", "represent_mapping", "(", "'tag:yaml.org,2002:map'", ",", "obj", ".", "__dict__", ")", "noalias_dumper", "=", "yaml", ".", "dumper", ".", "Dumper", "noalias_dumper", ".", "ignore_aliases", "=", "lambda", "self", ",", "data", ":", "True", "yaml", ".", "add_representer", "(", "unicode", ",", "lambda", "dumper", ",", "value", ":", "dumper", ".", "represent_scalar", "(", "u'tag:yaml.org,2002:str'", ",", "value", ")", ")", "yaml", ".", "add_multi_representer", "(", "object", ",", "obj_presenter", ")", "serialized", "=", "yaml", ".", "dump", "(", "node", ",", "default_flow_style", "=", "False", ",", "Dumper", "=", "noalias_dumper", ")", "return", "serialized" ]
Dump initialized object structure to yaml
[ "Dump", "initialized", "object", "structure", "to", "yaml" ]
python
train
41.744681
AmesCornish/buttersink
buttersink/ioctl.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/ioctl.py#L259-L263
def read(self, structure): """ Read and advance. """ start = self.offset self.skip(structure.size) return structure.read(self.buf, start)
[ "def", "read", "(", "self", ",", "structure", ")", ":", "start", "=", "self", ".", "offset", "self", ".", "skip", "(", "structure", ".", "size", ")", "return", "structure", ".", "read", "(", "self", ".", "buf", ",", "start", ")" ]
Read and advance.
[ "Read", "and", "advance", "." ]
python
train
33
vladcalin/gemstone
gemstone/core/microservice.py
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/microservice.py#L385-L393
def _add_extra_handlers(self, handlers): """ Adds the extra handler (defined by the user) :param handlers: a list of :py:class:`tornado.web.RequestHandler` instances. :return: """ extra_handlers = [(h[0], h[1], {"microservice": self}) for h in self.extra_handlers] handlers.extend(extra_handlers)
[ "def", "_add_extra_handlers", "(", "self", ",", "handlers", ")", ":", "extra_handlers", "=", "[", "(", "h", "[", "0", "]", ",", "h", "[", "1", "]", ",", "{", "\"microservice\"", ":", "self", "}", ")", "for", "h", "in", "self", ".", "extra_handlers", "]", "handlers", ".", "extend", "(", "extra_handlers", ")" ]
Adds the extra handler (defined by the user) :param handlers: a list of :py:class:`tornado.web.RequestHandler` instances. :return:
[ "Adds", "the", "extra", "handler", "(", "defined", "by", "the", "user", ")" ]
python
train
38.333333
rodluger/everest
everest/basecamp.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L52-L55
def unmasked(self, depth=0.01): """Return the unmasked overfitting metric for a given transit depth.""" return 1 - (np.hstack(self._O2) + np.hstack(self._O3) / depth) / np.hstack(self._O1)
[ "def", "unmasked", "(", "self", ",", "depth", "=", "0.01", ")", ":", "return", "1", "-", "(", "np", ".", "hstack", "(", "self", ".", "_O2", ")", "+", "np", ".", "hstack", "(", "self", ".", "_O3", ")", "/", "depth", ")", "/", "np", ".", "hstack", "(", "self", ".", "_O1", ")" ]
Return the unmasked overfitting metric for a given transit depth.
[ "Return", "the", "unmasked", "overfitting", "metric", "for", "a", "given", "transit", "depth", "." ]
python
train
55.25
klahnakoski/mo-logs
mo_logs/startup.py
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/startup.py#L60-L87
def read_settings(filename=None, defs=None): """ :param filename: Force load a file :param defs: arguments you want to accept :param default_filename: A config file from an environment variable (a fallback config file, if no other provided) :return: """ # READ SETTINGS defs = listwrap(defs) defs.append({ "name": ["--config", "--settings", "--settings-file", "--settings_file"], "help": "path to JSON file with settings", "type": str, "dest": "filename", "default": None, "required": False }) args = argparse(defs) args.filename = coalesce(filename, args.filename, "./config.json") settings_file = File(args.filename) if not settings_file.exists: Log.error("Can not read configuration file {{filename}}", { "filename": settings_file.abspath }) settings = mo_json_config.get_file(settings_file) settings.args = args return settings
[ "def", "read_settings", "(", "filename", "=", "None", ",", "defs", "=", "None", ")", ":", "# READ SETTINGS", "defs", "=", "listwrap", "(", "defs", ")", "defs", ".", "append", "(", "{", "\"name\"", ":", "[", "\"--config\"", ",", "\"--settings\"", ",", "\"--settings-file\"", ",", "\"--settings_file\"", "]", ",", "\"help\"", ":", "\"path to JSON file with settings\"", ",", "\"type\"", ":", "str", ",", "\"dest\"", ":", "\"filename\"", ",", "\"default\"", ":", "None", ",", "\"required\"", ":", "False", "}", ")", "args", "=", "argparse", "(", "defs", ")", "args", ".", "filename", "=", "coalesce", "(", "filename", ",", "args", ".", "filename", ",", "\"./config.json\"", ")", "settings_file", "=", "File", "(", "args", ".", "filename", ")", "if", "not", "settings_file", ".", "exists", ":", "Log", ".", "error", "(", "\"Can not read configuration file {{filename}}\"", ",", "{", "\"filename\"", ":", "settings_file", ".", "abspath", "}", ")", "settings", "=", "mo_json_config", ".", "get_file", "(", "settings_file", ")", "settings", ".", "args", "=", "args", "return", "settings" ]
:param filename: Force load a file :param defs: arguments you want to accept :param default_filename: A config file from an environment variable (a fallback config file, if no other provided) :return:
[ ":", "param", "filename", ":", "Force", "load", "a", "file", ":", "param", "defs", ":", "arguments", "you", "want", "to", "accept", ":", "param", "default_filename", ":", "A", "config", "file", "from", "an", "environment", "variable", "(", "a", "fallback", "config", "file", "if", "no", "other", "provided", ")", ":", "return", ":" ]
python
train
33.892857
gwastro/pycbc
pycbc/inference/io/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_mcmc.py#L148-L181
def write_acls(self, acls): """Writes the given autocorrelation lengths. The ACL of each parameter is saved to ``[sampler_group]/acls/{param}']``. The maximum over all the parameters is saved to the file's 'acl' attribute. Parameters ---------- acls : dict A dictionary of ACLs keyed by the parameter. Returns ------- ACL The maximum of the acls that was written to the file. """ group = self.sampler_group + '/acls/{}' # write the individual acls for param in acls: try: # we need to use the write_direct function because it's # apparently the only way to update scalars in h5py self[group.format(param)].write_direct( numpy.array(acls[param])) except KeyError: # dataset doesn't exist yet self[group.format(param)] = acls[param] # write the maximum over all params acl = numpy.array(acls.values()).max() self[self.sampler_group].attrs['acl'] = acl # set the default thin interval to be the acl (if it is finite) if numpy.isfinite(acl): self.thin_interval = int(numpy.ceil(acl))
[ "def", "write_acls", "(", "self", ",", "acls", ")", ":", "group", "=", "self", ".", "sampler_group", "+", "'/acls/{}'", "# write the individual acls", "for", "param", "in", "acls", ":", "try", ":", "# we need to use the write_direct function because it's", "# apparently the only way to update scalars in h5py", "self", "[", "group", ".", "format", "(", "param", ")", "]", ".", "write_direct", "(", "numpy", ".", "array", "(", "acls", "[", "param", "]", ")", ")", "except", "KeyError", ":", "# dataset doesn't exist yet", "self", "[", "group", ".", "format", "(", "param", ")", "]", "=", "acls", "[", "param", "]", "# write the maximum over all params", "acl", "=", "numpy", ".", "array", "(", "acls", ".", "values", "(", ")", ")", ".", "max", "(", ")", "self", "[", "self", ".", "sampler_group", "]", ".", "attrs", "[", "'acl'", "]", "=", "acl", "# set the default thin interval to be the acl (if it is finite)", "if", "numpy", ".", "isfinite", "(", "acl", ")", ":", "self", ".", "thin_interval", "=", "int", "(", "numpy", ".", "ceil", "(", "acl", ")", ")" ]
Writes the given autocorrelation lengths. The ACL of each parameter is saved to ``[sampler_group]/acls/{param}']``. The maximum over all the parameters is saved to the file's 'acl' attribute. Parameters ---------- acls : dict A dictionary of ACLs keyed by the parameter. Returns ------- ACL The maximum of the acls that was written to the file.
[ "Writes", "the", "given", "autocorrelation", "lengths", "." ]
python
train
37.117647
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L178-L233
def update_cluster_topology(self, assignment): """Modify the cluster-topology with given assignment. Change the replica set of partitions as in given assignment. :param assignment: dict representing actions to be used to update the current cluster-topology :raises: InvalidBrokerIdError when broker-id is invalid :raises: InvalidPartitionError when partition-name is invalid """ try: for partition_name, replica_ids in six.iteritems(assignment): try: new_replicas = [self.brokers[b_id] for b_id in replica_ids] except KeyError: self.log.error( "Invalid replicas %s for topic-partition %s-%s.", ', '.join([str(id) for id in replica_ids]), partition_name[0], partition_name[1], ) raise InvalidBrokerIdError( "Invalid replicas {0}.".format( ', '.join([str(id) for id in replica_ids]) ), ) try: partition = self.partitions[partition_name] old_replicas = [broker for broker in partition.replicas] # No change needed. Save ourself some CPU time. # Replica order matters as the first one is the leader. if new_replicas == old_replicas: continue # Remove old partitions from broker # This also updates partition replicas for broker in old_replicas: broker.remove_partition(partition) # Add new partition to brokers for broker in new_replicas: broker.add_partition(partition) except KeyError: self.log.error( "Invalid topic-partition %s-%s.", partition_name[0], partition_name[1], ) raise InvalidPartitionError( "Invalid topic-partition {0}-{1}." .format(partition_name[0], partition_name[1]), ) except KeyError: self.log.error("Could not parse given assignment {0}".format(assignment)) raise
[ "def", "update_cluster_topology", "(", "self", ",", "assignment", ")", ":", "try", ":", "for", "partition_name", ",", "replica_ids", "in", "six", ".", "iteritems", "(", "assignment", ")", ":", "try", ":", "new_replicas", "=", "[", "self", ".", "brokers", "[", "b_id", "]", "for", "b_id", "in", "replica_ids", "]", "except", "KeyError", ":", "self", ".", "log", ".", "error", "(", "\"Invalid replicas %s for topic-partition %s-%s.\"", ",", "', '", ".", "join", "(", "[", "str", "(", "id", ")", "for", "id", "in", "replica_ids", "]", ")", ",", "partition_name", "[", "0", "]", ",", "partition_name", "[", "1", "]", ",", ")", "raise", "InvalidBrokerIdError", "(", "\"Invalid replicas {0}.\"", ".", "format", "(", "', '", ".", "join", "(", "[", "str", "(", "id", ")", "for", "id", "in", "replica_ids", "]", ")", ")", ",", ")", "try", ":", "partition", "=", "self", ".", "partitions", "[", "partition_name", "]", "old_replicas", "=", "[", "broker", "for", "broker", "in", "partition", ".", "replicas", "]", "# No change needed. Save ourself some CPU time.", "# Replica order matters as the first one is the leader.", "if", "new_replicas", "==", "old_replicas", ":", "continue", "# Remove old partitions from broker", "# This also updates partition replicas", "for", "broker", "in", "old_replicas", ":", "broker", ".", "remove_partition", "(", "partition", ")", "# Add new partition to brokers", "for", "broker", "in", "new_replicas", ":", "broker", ".", "add_partition", "(", "partition", ")", "except", "KeyError", ":", "self", ".", "log", ".", "error", "(", "\"Invalid topic-partition %s-%s.\"", ",", "partition_name", "[", "0", "]", ",", "partition_name", "[", "1", "]", ",", ")", "raise", "InvalidPartitionError", "(", "\"Invalid topic-partition {0}-{1}.\"", ".", "format", "(", "partition_name", "[", "0", "]", ",", "partition_name", "[", "1", "]", ")", ",", ")", "except", "KeyError", ":", "self", ".", "log", ".", "error", "(", "\"Could not parse given assignment {0}\"", ".", "format", "(", "assignment", ")", ")", "raise" ]
Modify the cluster-topology with given assignment. Change the replica set of partitions as in given assignment. :param assignment: dict representing actions to be used to update the current cluster-topology :raises: InvalidBrokerIdError when broker-id is invalid :raises: InvalidPartitionError when partition-name is invalid
[ "Modify", "the", "cluster", "-", "topology", "with", "given", "assignment", "." ]
python
train
43.732143