repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
quantum5/2048
_2048/manager.py
https://github.com/quantum5/2048/blob/93ada2e3026eaf154e1bbee943d0500c9253e66f/_2048/manager.py#L88-L91
def new_game(self): """Creates a new game of 2048.""" self.game = self.game_class(self, self.screen) self.save()
[ "def", "new_game", "(", "self", ")", ":", "self", ".", "game", "=", "self", ".", "game_class", "(", "self", ",", "self", ".", "screen", ")", "self", ".", "save", "(", ")" ]
Creates a new game of 2048.
[ "Creates", "a", "new", "game", "of", "2048", "." ]
python
train
33.25
Esri/ArcREST
src/arcrest/common/geometry.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/geometry.py#L130-L134
def asArcPyObject(self): """ returns the Point as an ESRI arcpy.Point object """ if arcpyFound == False: raise Exception("ArcPy is required to use this function") return arcpy.AsShape(self.asDictionary, True)
[ "def", "asArcPyObject", "(", "self", ")", ":", "if", "arcpyFound", "==", "False", ":", "raise", "Exception", "(", "\"ArcPy is required to use this function\"", ")", "return", "arcpy", ".", "AsShape", "(", "self", ".", "asDictionary", ",", "True", ")" ]
returns the Point as an ESRI arcpy.Point object
[ "returns", "the", "Point", "as", "an", "ESRI", "arcpy", ".", "Point", "object" ]
python
train
48
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiNetworkIPv6.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiNetworkIPv6.py#L35-L44
def get_by_id(self, id_networkv6): """Get IPv6 network :param id_networkv4: ID for NetworkIPv6 :return: IPv6 Network """ uri = 'api/networkv4/%s/' % id_networkv6 return super(ApiNetworkIPv6, self).get(uri)
[ "def", "get_by_id", "(", "self", ",", "id_networkv6", ")", ":", "uri", "=", "'api/networkv4/%s/'", "%", "id_networkv6", "return", "super", "(", "ApiNetworkIPv6", ",", "self", ")", ".", "get", "(", "uri", ")" ]
Get IPv6 network :param id_networkv4: ID for NetworkIPv6 :return: IPv6 Network
[ "Get", "IPv6", "network" ]
python
train
24.7
pypa/pipenv
pipenv/vendor/jinja2/environment.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/environment.py#L1108-L1115
def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1
[ "def", "get_corresponding_lineno", "(", "self", ",", "lineno", ")", ":", "for", "template_line", ",", "code_line", "in", "reversed", "(", "self", ".", "debug_info", ")", ":", "if", "code_line", "<=", "lineno", ":", "return", "template_line", "return", "1" ]
Return the source line number of a line number in the generated bytecode as they are not in sync.
[ "Return", "the", "source", "line", "number", "of", "a", "line", "number", "in", "the", "generated", "bytecode", "as", "they", "are", "not", "in", "sync", "." ]
python
train
40.25
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L686-L698
def rover_yaw_rate(VFR_HUD, SERVO_OUTPUT_RAW): '''return yaw rate in degrees/second given steering_angle and speed''' max_wheel_turn=35 speed = VFR_HUD.groundspeed # assume 1100 to 1900 PWM on steering steering_angle = max_wheel_turn * (SERVO_OUTPUT_RAW.servo1_raw - 1500) / 400.0 if abs(steering_angle) < 1.0e-6 or abs(speed) < 1.0e-6: return 0 d = rover_turn_circle(SERVO_OUTPUT_RAW) c = pi * d t = c / speed rate = 360.0 / t return rate
[ "def", "rover_yaw_rate", "(", "VFR_HUD", ",", "SERVO_OUTPUT_RAW", ")", ":", "max_wheel_turn", "=", "35", "speed", "=", "VFR_HUD", ".", "groundspeed", "# assume 1100 to 1900 PWM on steering", "steering_angle", "=", "max_wheel_turn", "*", "(", "SERVO_OUTPUT_RAW", ".", "servo1_raw", "-", "1500", ")", "/", "400.0", "if", "abs", "(", "steering_angle", ")", "<", "1.0e-6", "or", "abs", "(", "speed", ")", "<", "1.0e-6", ":", "return", "0", "d", "=", "rover_turn_circle", "(", "SERVO_OUTPUT_RAW", ")", "c", "=", "pi", "*", "d", "t", "=", "c", "/", "speed", "rate", "=", "360.0", "/", "t", "return", "rate" ]
return yaw rate in degrees/second given steering_angle and speed
[ "return", "yaw", "rate", "in", "degrees", "/", "second", "given", "steering_angle", "and", "speed" ]
python
train
36.846154
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1778-L1788
def load(self, filename=None, ignore_discard=False, ignore_expires=False): """Load cookies from a file.""" if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) f = open(filename) try: self._really_load(f, filename, ignore_discard, ignore_expires) finally: f.close()
[ "def", "load", "(", "self", ",", "filename", "=", "None", ",", "ignore_discard", "=", "False", ",", "ignore_expires", "=", "False", ")", ":", "if", "filename", "is", "None", ":", "if", "self", ".", "filename", "is", "not", "None", ":", "filename", "=", "self", ".", "filename", "else", ":", "raise", "ValueError", "(", "MISSING_FILENAME_TEXT", ")", "f", "=", "open", "(", "filename", ")", "try", ":", "self", ".", "_really_load", "(", "f", ",", "filename", ",", "ignore_discard", ",", "ignore_expires", ")", "finally", ":", "f", ".", "close", "(", ")" ]
Load cookies from a file.
[ "Load", "cookies", "from", "a", "file", "." ]
python
train
37.545455
numenta/nupic
src/nupic/swarming/exp_generator/experiment_generator.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/exp_generator/experiment_generator.py#L272-L296
def _indentLines(str, indentLevels = 1, indentFirstLine=True): """ Indent all lines in the given string str: input string indentLevels: number of levels of indentation to apply indentFirstLine: if False, the 1st line will not be indented Returns: The result string with all lines indented """ indent = _ONE_INDENT * indentLevels lines = str.splitlines(True) result = '' if len(lines) > 0 and not indentFirstLine: first = 1 result += lines[0] else: first = 0 for line in lines[first:]: result += indent + line return result
[ "def", "_indentLines", "(", "str", ",", "indentLevels", "=", "1", ",", "indentFirstLine", "=", "True", ")", ":", "indent", "=", "_ONE_INDENT", "*", "indentLevels", "lines", "=", "str", ".", "splitlines", "(", "True", ")", "result", "=", "''", "if", "len", "(", "lines", ")", ">", "0", "and", "not", "indentFirstLine", ":", "first", "=", "1", "result", "+=", "lines", "[", "0", "]", "else", ":", "first", "=", "0", "for", "line", "in", "lines", "[", "first", ":", "]", ":", "result", "+=", "indent", "+", "line", "return", "result" ]
Indent all lines in the given string str: input string indentLevels: number of levels of indentation to apply indentFirstLine: if False, the 1st line will not be indented Returns: The result string with all lines indented
[ "Indent", "all", "lines", "in", "the", "given", "string" ]
python
valid
22.44
pkgw/pwkit
pwkit/synphot.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/synphot.py#L474-L482
def register_halfmaxes(self, telescope, band, lower, upper): """Register precomputed half-max points.""" if (telescope, band) in self._halfmaxes: raise AlreadyDefinedError('half-max points for %s/%s already ' 'defined', telescope, band) self._note(telescope, band) self._halfmaxes[telescope,band] = (lower, upper) return self
[ "def", "register_halfmaxes", "(", "self", ",", "telescope", ",", "band", ",", "lower", ",", "upper", ")", ":", "if", "(", "telescope", ",", "band", ")", "in", "self", ".", "_halfmaxes", ":", "raise", "AlreadyDefinedError", "(", "'half-max points for %s/%s already '", "'defined'", ",", "telescope", ",", "band", ")", "self", ".", "_note", "(", "telescope", ",", "band", ")", "self", ".", "_halfmaxes", "[", "telescope", ",", "band", "]", "=", "(", "lower", ",", "upper", ")", "return", "self" ]
Register precomputed half-max points.
[ "Register", "precomputed", "half", "-", "max", "points", "." ]
python
train
45.333333
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L927-L949
def cp_files(self, source, target, delete_source=False): '''Copy files This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by copying all files and keep the directory structure. ''' pool = ThreadPool(ThreadUtil, self.opt) source = self.source_expand(source) if target[-1] == PATH_SEP: for src in source: self.cp_single_file(pool, src, os.path.join(target, self.get_basename(S3URL(src).path)), delete_source) else: if len(source) > 1: raise Failure('Target "%s" is not a directory (with a trailing slash).' % target) # Copy file if it exists otherwise do nothing elif len(source) == 1: self.cp_single_file(pool, source[0], target, delete_source) else: # Source expand may return empty list only if ignore-empty-source is set to true pass pool.join()
[ "def", "cp_files", "(", "self", ",", "source", ",", "target", ",", "delete_source", "=", "False", ")", ":", "pool", "=", "ThreadPool", "(", "ThreadUtil", ",", "self", ".", "opt", ")", "source", "=", "self", ".", "source_expand", "(", "source", ")", "if", "target", "[", "-", "1", "]", "==", "PATH_SEP", ":", "for", "src", "in", "source", ":", "self", ".", "cp_single_file", "(", "pool", ",", "src", ",", "os", ".", "path", ".", "join", "(", "target", ",", "self", ".", "get_basename", "(", "S3URL", "(", "src", ")", ".", "path", ")", ")", ",", "delete_source", ")", "else", ":", "if", "len", "(", "source", ")", ">", "1", ":", "raise", "Failure", "(", "'Target \"%s\" is not a directory (with a trailing slash).'", "%", "target", ")", "# Copy file if it exists otherwise do nothing", "elif", "len", "(", "source", ")", "==", "1", ":", "self", ".", "cp_single_file", "(", "pool", ",", "source", "[", "0", "]", ",", "target", ",", "delete_source", ")", "else", ":", "# Source expand may return empty list only if ignore-empty-source is set to true", "pass", "pool", ".", "join", "(", ")" ]
Copy files This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by copying all files and keep the directory structure.
[ "Copy", "files", "This", "function", "can", "handle", "multiple", "files", "if", "source", "S3", "URL", "has", "wildcard", "characters", ".", "It", "also", "handles", "recursive", "mode", "by", "copying", "all", "files", "and", "keep", "the", "directory", "structure", "." ]
python
test
39.565217
hammerlab/cohorts
cohorts/cohort.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1171-L1193
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs): """Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve """ plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs) df = filter_not_null(df, "benefit") df = filter_not_null(df, plot_col) df.benefit = df.benefit.astype(bool) return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
[ "def", "plot_roc_curve", "(", "self", ",", "on", ",", "bootstrap_samples", "=", "100", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "plot_col", ",", "df", "=", "self", ".", "as_dataframe", "(", "on", ",", "return_cols", "=", "True", ",", "*", "*", "kwargs", ")", "df", "=", "filter_not_null", "(", "df", ",", "\"benefit\"", ")", "df", "=", "filter_not_null", "(", "df", ",", "plot_col", ")", "df", ".", "benefit", "=", "df", ".", "benefit", ".", "astype", "(", "bool", ")", "return", "roc_curve_plot", "(", "df", ",", "plot_col", ",", "\"benefit\"", ",", "bootstrap_samples", ",", "ax", "=", "ax", ")" ]
Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve
[ "Plot", "an", "ROC", "curve", "for", "benefit", "and", "a", "given", "variable" ]
python
train
39.956522
Clarify/clarify_python
clarify_python/clarify.py
https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L829-L848
def _get_headers(self): """Get all the headers we're going to need: 1. Authorization 2. Content-Type 3. User-agent Note that the User-agent string contains the library name, the libary version, and the python version. This will help us track what people are using, and where we should concentrate our development efforts.""" user_agent = __api_lib_name__ + '/' + __version__ + '/' + \ PYTHON_VERSION headers = {'User-Agent': user_agent, 'Content-Type': 'application/x-www-form-urlencoded'} if self.key: headers['Authorization'] = 'Bearer ' + self.key return headers
[ "def", "_get_headers", "(", "self", ")", ":", "user_agent", "=", "__api_lib_name__", "+", "'/'", "+", "__version__", "+", "'/'", "+", "PYTHON_VERSION", "headers", "=", "{", "'User-Agent'", ":", "user_agent", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", "}", "if", "self", ".", "key", ":", "headers", "[", "'Authorization'", "]", "=", "'Bearer '", "+", "self", ".", "key", "return", "headers" ]
Get all the headers we're going to need: 1. Authorization 2. Content-Type 3. User-agent Note that the User-agent string contains the library name, the libary version, and the python version. This will help us track what people are using, and where we should concentrate our development efforts.
[ "Get", "all", "the", "headers", "we", "re", "going", "to", "need", ":" ]
python
train
34.45
yohell/python-tui
tui/__init__.py
https://github.com/yohell/python-tui/blob/de2e678e2f00e5940de52c000214dbcb8812a222/tui/__init__.py#L681-L703
def parsestr(self, argsstr, usedname, location): """Parse a string lexically and store the result. ARGS: argsstr <str>: The string to parse. usedname <str>: The string used by the user to invoke the option. location <str>: A user friendly sring describing where the parser got this data from. """ try: value = self.format.parsestr(argsstr) except formats.BadNumberOfArguments, e: raise BadNumberOfArguments(usedname, e.required, e.supplied) except formats.BadArgument, e: raise BadArgument(usedname, e.argument, e.message) if self.recurring: self.value.append(value) else: self.value = value self.location = location
[ "def", "parsestr", "(", "self", ",", "argsstr", ",", "usedname", ",", "location", ")", ":", "try", ":", "value", "=", "self", ".", "format", ".", "parsestr", "(", "argsstr", ")", "except", "formats", ".", "BadNumberOfArguments", ",", "e", ":", "raise", "BadNumberOfArguments", "(", "usedname", ",", "e", ".", "required", ",", "e", ".", "supplied", ")", "except", "formats", ".", "BadArgument", ",", "e", ":", "raise", "BadArgument", "(", "usedname", ",", "e", ".", "argument", ",", "e", ".", "message", ")", "if", "self", ".", "recurring", ":", "self", ".", "value", ".", "append", "(", "value", ")", "else", ":", "self", ".", "value", "=", "value", "self", ".", "location", "=", "location" ]
Parse a string lexically and store the result. ARGS: argsstr <str>: The string to parse. usedname <str>: The string used by the user to invoke the option. location <str>: A user friendly sring describing where the parser got this data from.
[ "Parse", "a", "string", "lexically", "and", "store", "the", "result", ".", "ARGS", ":", "argsstr", "<str", ">", ":", "The", "string", "to", "parse", ".", "usedname", "<str", ">", ":", "The", "string", "used", "by", "the", "user", "to", "invoke", "the", "option", ".", "location", "<str", ">", ":", "A", "user", "friendly", "sring", "describing", "where", "the", "parser", "got", "this", "data", "from", "." ]
python
valid
34.73913
LonamiWebs/Telethon
telethon/tl/core/gzippacked.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/core/gzippacked.py#L14-L25
def gzip_if_smaller(content_related, data): """Calls bytes(request), and based on a certain threshold, optionally gzips the resulting data. If the gzipped data is smaller than the original byte array, this is returned instead. Note that this only applies to content related requests. """ if content_related and len(data) > 512: gzipped = bytes(GzipPacked(data)) return gzipped if len(gzipped) < len(data) else data else: return data
[ "def", "gzip_if_smaller", "(", "content_related", ",", "data", ")", ":", "if", "content_related", "and", "len", "(", "data", ")", ">", "512", ":", "gzipped", "=", "bytes", "(", "GzipPacked", "(", "data", ")", ")", "return", "gzipped", "if", "len", "(", "gzipped", ")", "<", "len", "(", "data", ")", "else", "data", "else", ":", "return", "data" ]
Calls bytes(request), and based on a certain threshold, optionally gzips the resulting data. If the gzipped data is smaller than the original byte array, this is returned instead. Note that this only applies to content related requests.
[ "Calls", "bytes", "(", "request", ")", "and", "based", "on", "a", "certain", "threshold", "optionally", "gzips", "the", "resulting", "data", ".", "If", "the", "gzipped", "data", "is", "smaller", "than", "the", "original", "byte", "array", "this", "is", "returned", "instead", "." ]
python
train
43.583333
cackharot/suds-py3
suds/wsdl.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/wsdl.py#L664-L675
def resolveport(self, definitions): """ Resolve port_type reference. @param definitions: A definitions object. @type definitions: L{Definitions} """ ref = qualify(self.type, self.root, definitions.tns) port_type = definitions.port_types.get(ref) if port_type is None: raise Exception("portType '%s', not-found" % self.type) else: self.type = port_type
[ "def", "resolveport", "(", "self", ",", "definitions", ")", ":", "ref", "=", "qualify", "(", "self", ".", "type", ",", "self", ".", "root", ",", "definitions", ".", "tns", ")", "port_type", "=", "definitions", ".", "port_types", ".", "get", "(", "ref", ")", "if", "port_type", "is", "None", ":", "raise", "Exception", "(", "\"portType '%s', not-found\"", "%", "self", ".", "type", ")", "else", ":", "self", ".", "type", "=", "port_type" ]
Resolve port_type reference. @param definitions: A definitions object. @type definitions: L{Definitions}
[ "Resolve", "port_type", "reference", "." ]
python
train
36.333333
PBR/MQ2
MQ2/mapchart.py
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/mapchart.py#L40-L99
def _extrac_qtl(peak, block, headers): """ Given a row containing the peak of the QTL and all the rows of the linkage group of the said QTL (splitted per trait), determine the QTL interval and find the start and stop marker of the said interval. The interval is a LOD 2 interval. The approach is conservative in the way it takes the first and last marker within the interval. :arg peak, a list containing the row information for the peak marker :arg block, a hash containing per column, all the rows in the linkage group of this QTL, splitted per trait. :arg headers, the first row of the QTL matrix file, used to determine which block to look at for each trait process. """ qtls = [] if not peak: return qtls threshold = 2 for trait in peak: blockcnt = headers.index(trait) local_block = block[blockcnt] lod2_threshold = float(peak[trait][-1]) - float(threshold) # Search QTL start cnt = local_block.index(peak[trait]) start = local_block[cnt] while cnt >= 0: start = local_block[cnt] if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]): cnt = cnt - 1 continue if float(local_block[cnt][-1]) < lod2_threshold: break cnt = cnt - 1 # Search QTL end end = [] cnt = local_block.index(peak[trait]) end = local_block[cnt] while cnt < len(local_block): end = local_block[cnt] if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]): cnt += 1 continue if float(local_block[cnt][-1]) < lod2_threshold: break cnt = cnt + 1 qtl = QTL() qtl.trait = trait qtl.start_mk = start[0] qtl.start_position = start[2] qtl.peak_mk = peak[trait][0] qtl.peak_start_position = peak[trait][2] qtl.peak_stop_position = peak[trait][2] qtl.stop_mk = end[0] qtl.stop_position = end[2] qtls.append(qtl) return qtls
[ "def", "_extrac_qtl", "(", "peak", ",", "block", ",", "headers", ")", ":", "qtls", "=", "[", "]", "if", "not", "peak", ":", "return", "qtls", "threshold", "=", "2", "for", "trait", "in", "peak", ":", "blockcnt", "=", "headers", ".", "index", "(", "trait", ")", "local_block", "=", "block", "[", "blockcnt", "]", "lod2_threshold", "=", "float", "(", "peak", "[", "trait", "]", "[", "-", "1", "]", ")", "-", "float", "(", "threshold", ")", "# Search QTL start", "cnt", "=", "local_block", ".", "index", "(", "peak", "[", "trait", "]", ")", "start", "=", "local_block", "[", "cnt", "]", "while", "cnt", ">=", "0", ":", "start", "=", "local_block", "[", "cnt", "]", "if", "re", ".", "match", "(", "r'c\\d+\\.loc[\\d\\.]+'", ",", "local_block", "[", "cnt", "]", "[", "0", "]", ")", ":", "cnt", "=", "cnt", "-", "1", "continue", "if", "float", "(", "local_block", "[", "cnt", "]", "[", "-", "1", "]", ")", "<", "lod2_threshold", ":", "break", "cnt", "=", "cnt", "-", "1", "# Search QTL end", "end", "=", "[", "]", "cnt", "=", "local_block", ".", "index", "(", "peak", "[", "trait", "]", ")", "end", "=", "local_block", "[", "cnt", "]", "while", "cnt", "<", "len", "(", "local_block", ")", ":", "end", "=", "local_block", "[", "cnt", "]", "if", "re", ".", "match", "(", "r'c\\d+\\.loc[\\d\\.]+'", ",", "local_block", "[", "cnt", "]", "[", "0", "]", ")", ":", "cnt", "+=", "1", "continue", "if", "float", "(", "local_block", "[", "cnt", "]", "[", "-", "1", "]", ")", "<", "lod2_threshold", ":", "break", "cnt", "=", "cnt", "+", "1", "qtl", "=", "QTL", "(", ")", "qtl", ".", "trait", "=", "trait", "qtl", ".", "start_mk", "=", "start", "[", "0", "]", "qtl", ".", "start_position", "=", "start", "[", "2", "]", "qtl", ".", "peak_mk", "=", "peak", "[", "trait", "]", "[", "0", "]", "qtl", ".", "peak_start_position", "=", "peak", "[", "trait", "]", "[", "2", "]", "qtl", ".", "peak_stop_position", "=", "peak", "[", "trait", "]", "[", "2", "]", "qtl", ".", "stop_mk", "=", "end", "[", "0", "]", "qtl", ".", "stop_position", "=", "end", "[", "2", "]", "qtls", ".", "append", "(", "qtl", ")", "return", "qtls" ]
Given a row containing the peak of the QTL and all the rows of the linkage group of the said QTL (splitted per trait), determine the QTL interval and find the start and stop marker of the said interval. The interval is a LOD 2 interval. The approach is conservative in the way it takes the first and last marker within the interval. :arg peak, a list containing the row information for the peak marker :arg block, a hash containing per column, all the rows in the linkage group of this QTL, splitted per trait. :arg headers, the first row of the QTL matrix file, used to determine which block to look at for each trait process.
[ "Given", "a", "row", "containing", "the", "peak", "of", "the", "QTL", "and", "all", "the", "rows", "of", "the", "linkage", "group", "of", "the", "said", "QTL", "(", "splitted", "per", "trait", ")", "determine", "the", "QTL", "interval", "and", "find", "the", "start", "and", "stop", "marker", "of", "the", "said", "interval", ".", "The", "interval", "is", "a", "LOD", "2", "interval", ".", "The", "approach", "is", "conservative", "in", "the", "way", "it", "takes", "the", "first", "and", "last", "marker", "within", "the", "interval", "." ]
python
train
34.75
gem/oq-engine
openquake/hazardlib/mfd/multi_mfd.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/mfd/multi_mfd.py#L121-L130
def get_min_max_mag(self): """ :returns: minumum and maximum magnitudes from the underlying MFDs """ m1s, m2s = [], [] for mfd in self: m1, m2 = mfd.get_min_max_mag() m1s.append(m1) m2s.append(m2) return min(m1s), max(m2s)
[ "def", "get_min_max_mag", "(", "self", ")", ":", "m1s", ",", "m2s", "=", "[", "]", ",", "[", "]", "for", "mfd", "in", "self", ":", "m1", ",", "m2", "=", "mfd", ".", "get_min_max_mag", "(", ")", "m1s", ".", "append", "(", "m1", ")", "m2s", ".", "append", "(", "m2", ")", "return", "min", "(", "m1s", ")", ",", "max", "(", "m2s", ")" ]
:returns: minumum and maximum magnitudes from the underlying MFDs
[ ":", "returns", ":", "minumum", "and", "maximum", "magnitudes", "from", "the", "underlying", "MFDs" ]
python
train
29.7
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/io.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/io.py#L349-L354
def show(self): """write my output to sys.stdout/err as appropriate""" sys.stdout.write(self.stdout) sys.stderr.write(self.stderr) sys.stdout.flush() sys.stderr.flush()
[ "def", "show", "(", "self", ")", ":", "sys", ".", "stdout", ".", "write", "(", "self", ".", "stdout", ")", "sys", ".", "stderr", ".", "write", "(", "self", ".", "stderr", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stderr", ".", "flush", "(", ")" ]
write my output to sys.stdout/err as appropriate
[ "write", "my", "output", "to", "sys", ".", "stdout", "/", "err", "as", "appropriate" ]
python
test
33.833333
Kaggle/kaggle-api
kaggle/api/kaggle_api_extended.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2359-L2386
def upload_complete(self, path, url, quiet): """ function to complete an upload to retrieve a path from a url Parameters ========== path: the path for the upload that is read in url: the url to send the POST to quiet: suppress verbose output (default is False) """ file_size = os.path.getsize(path) try: with tqdm( total=file_size, unit='B', unit_scale=True, unit_divisor=1024, disable=quiet) as progress_bar: with io.open(path, 'rb', buffering=0) as fp: reader = TqdmBufferedReader(fp, progress_bar) session = requests.Session() retries = Retry(total=10, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retries) session.mount('http://', adapter) session.mount('https://', adapter) response = session.put(url, data=reader) except Exception as error: print(error) return False return response.status_code == 200 or response.status_code == 201
[ "def", "upload_complete", "(", "self", ",", "path", ",", "url", ",", "quiet", ")", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "path", ")", "try", ":", "with", "tqdm", "(", "total", "=", "file_size", ",", "unit", "=", "'B'", ",", "unit_scale", "=", "True", ",", "unit_divisor", "=", "1024", ",", "disable", "=", "quiet", ")", "as", "progress_bar", ":", "with", "io", ".", "open", "(", "path", ",", "'rb'", ",", "buffering", "=", "0", ")", "as", "fp", ":", "reader", "=", "TqdmBufferedReader", "(", "fp", ",", "progress_bar", ")", "session", "=", "requests", ".", "Session", "(", ")", "retries", "=", "Retry", "(", "total", "=", "10", ",", "backoff_factor", "=", "0.5", ")", "adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "retries", ")", "session", ".", "mount", "(", "'http://'", ",", "adapter", ")", "session", ".", "mount", "(", "'https://'", ",", "adapter", ")", "response", "=", "session", ".", "put", "(", "url", ",", "data", "=", "reader", ")", "except", "Exception", "as", "error", ":", "print", "(", "error", ")", "return", "False", "return", "response", ".", "status_code", "==", "200", "or", "response", ".", "status_code", "==", "201" ]
function to complete an upload to retrieve a path from a url Parameters ========== path: the path for the upload that is read in url: the url to send the POST to quiet: suppress verbose output (default is False)
[ "function", "to", "complete", "an", "upload", "to", "retrieve", "a", "path", "from", "a", "url", "Parameters", "==========", "path", ":", "the", "path", "for", "the", "upload", "that", "is", "read", "in", "url", ":", "the", "url", "to", "send", "the", "POST", "to", "quiet", ":", "suppress", "verbose", "output", "(", "default", "is", "False", ")" ]
python
train
43.607143
yjzhang/uncurl_python
uncurl/dimensionality_reduction.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L9-L28
def diffusion_mds(means, weights, d, diffusion_rounds=10): """ Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells) """ for i in range(diffusion_rounds): weights = weights*weights weights = weights/weights.sum(0) X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
[ "def", "diffusion_mds", "(", "means", ",", "weights", ",", "d", ",", "diffusion_rounds", "=", "10", ")", ":", "for", "i", "in", "range", "(", "diffusion_rounds", ")", ":", "weights", "=", "weights", "*", "weights", "weights", "=", "weights", "/", "weights", ".", "sum", "(", "0", ")", "X", "=", "dim_reduce", "(", "means", ",", "weights", ",", "d", ")", "if", "X", ".", "shape", "[", "0", "]", "==", "2", ":", "return", "X", ".", "dot", "(", "weights", ")", "else", ":", "return", "X", ".", "T", ".", "dot", "(", "weights", ")" ]
Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells)
[ "Dimensionality", "reduction", "using", "MDS", "while", "running", "diffusion", "on", "W", "." ]
python
train
28.5
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14341-L14424
def unregister(self, cleanup_mode): """Unregisters a machine previously registered with :py:func:`IVirtualBox.register_machine` and optionally do additional cleanup before the machine is unregistered. This method does not delete any files. It only changes the machine configuration and the list of registered machines in the VirtualBox object. To delete the files which belonged to the machine, including the XML file of the machine itself, call :py:func:`delete_config` , optionally with the array of IMedium objects which was returned from this method. How thoroughly this method cleans up the machine configuration before unregistering the machine depends on the @a cleanupMode argument. With "UnregisterOnly", the machine will only be unregistered, but no additional cleanup will be performed. The call will fail if the machine is in "Saved" state or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ). It is the responsibility of the caller to delete all such configuration in this mode. In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API which it replaces. With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved" state or if it has snapshots or media attached. All media attached to the current machine state or in snapshots will be detached. No medium objects will be returned; all of the machine's media will remain open. With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone", except that all the hard disk medium objects which were detached from the machine will be returned as an array. This allows for quickly passing them to the :py:func:`delete_config` API for closing and deletion. With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except that all media will be returned in the array, including removable media like DVDs and floppies. This might be useful if the user wants to inspect in detail which media were attached to the machine. Be careful when passing the media array to :py:func:`delete_config` in that case because users will typically want to preserve ISO and RAW image files. A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely deleted with all its saved states and hard disk images, but images for removable drives (such as ISO and RAW files) will remain on disk. This API does not verify whether the media files returned in the array are still attached to other machines (i.e. shared between several machines). If such a shared image is passed to :py:func:`delete_config` however, closing the image will fail there and the image will be silently skipped. This API may, however, move media from this machine's media registry to other media registries (see :py:class:`IMedium` for details on media registries). For machines created with VirtualBox 4.0 or later, if media from this machine's media registry are also attached to another machine (shared attachments), each such medium will be moved to another machine's registry. This is because without this machine's media registry, the other machine cannot find its media any more and would become inaccessible. This API implicitly calls :py:func:`save_settings` to save all current machine settings before unregistering it. It may also silently call :py:func:`save_settings` on other machines if media are moved to other machines' media registries. After successful method invocation, the :py:class:`IMachineRegisteredEvent` event is fired. The call will fail if the machine is currently locked (see :py:class:`ISession` ). If the given machine is inaccessible (see :py:func:`accessible` ), it will be unregistered and fully uninitialized right afterwards. As a result, the returned machine object will be unusable and an attempt to call **any** method will return the "Object not ready" error. in cleanup_mode of type :class:`CleanupMode` How to clean up after the machine has been unregistered. return media of type :class:`IMedium` List of media detached from the machine, depending on the @a cleanupMode parameter. raises :class:`VBoxErrorInvalidObjectState` Machine is currently locked for a session. """ if not isinstance(cleanup_mode, CleanupMode): raise TypeError("cleanup_mode can only be an instance of type CleanupMode") media = self._call("unregister", in_p=[cleanup_mode]) media = [IMedium(a) for a in media] return media
[ "def", "unregister", "(", "self", ",", "cleanup_mode", ")", ":", "if", "not", "isinstance", "(", "cleanup_mode", ",", "CleanupMode", ")", ":", "raise", "TypeError", "(", "\"cleanup_mode can only be an instance of type CleanupMode\"", ")", "media", "=", "self", ".", "_call", "(", "\"unregister\"", ",", "in_p", "=", "[", "cleanup_mode", "]", ")", "media", "=", "[", "IMedium", "(", "a", ")", "for", "a", "in", "media", "]", "return", "media" ]
Unregisters a machine previously registered with :py:func:`IVirtualBox.register_machine` and optionally do additional cleanup before the machine is unregistered. This method does not delete any files. It only changes the machine configuration and the list of registered machines in the VirtualBox object. To delete the files which belonged to the machine, including the XML file of the machine itself, call :py:func:`delete_config` , optionally with the array of IMedium objects which was returned from this method. How thoroughly this method cleans up the machine configuration before unregistering the machine depends on the @a cleanupMode argument. With "UnregisterOnly", the machine will only be unregistered, but no additional cleanup will be performed. The call will fail if the machine is in "Saved" state or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ). It is the responsibility of the caller to delete all such configuration in this mode. In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API which it replaces. With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved" state or if it has snapshots or media attached. All media attached to the current machine state or in snapshots will be detached. No medium objects will be returned; all of the machine's media will remain open. With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone", except that all the hard disk medium objects which were detached from the machine will be returned as an array. This allows for quickly passing them to the :py:func:`delete_config` API for closing and deletion. With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except that all media will be returned in the array, including removable media like DVDs and floppies. This might be useful if the user wants to inspect in detail which media were attached to the machine. Be careful when passing the media array to :py:func:`delete_config` in that case because users will typically want to preserve ISO and RAW image files. A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely deleted with all its saved states and hard disk images, but images for removable drives (such as ISO and RAW files) will remain on disk. This API does not verify whether the media files returned in the array are still attached to other machines (i.e. shared between several machines). If such a shared image is passed to :py:func:`delete_config` however, closing the image will fail there and the image will be silently skipped. This API may, however, move media from this machine's media registry to other media registries (see :py:class:`IMedium` for details on media registries). For machines created with VirtualBox 4.0 or later, if media from this machine's media registry are also attached to another machine (shared attachments), each such medium will be moved to another machine's registry. This is because without this machine's media registry, the other machine cannot find its media any more and would become inaccessible. This API implicitly calls :py:func:`save_settings` to save all current machine settings before unregistering it. It may also silently call :py:func:`save_settings` on other machines if media are moved to other machines' media registries. After successful method invocation, the :py:class:`IMachineRegisteredEvent` event is fired. The call will fail if the machine is currently locked (see :py:class:`ISession` ). If the given machine is inaccessible (see :py:func:`accessible` ), it will be unregistered and fully uninitialized right afterwards. As a result, the returned machine object will be unusable and an attempt to call **any** method will return the "Object not ready" error. in cleanup_mode of type :class:`CleanupMode` How to clean up after the machine has been unregistered. return media of type :class:`IMedium` List of media detached from the machine, depending on the @a cleanupMode parameter. raises :class:`VBoxErrorInvalidObjectState` Machine is currently locked for a session.
[ "Unregisters", "a", "machine", "previously", "registered", "with", ":", "py", ":", "func", ":", "IVirtualBox", ".", "register_machine", "and", "optionally", "do", "additional", "cleanup", "before", "the", "machine", "is", "unregistered", ".", "This", "method", "does", "not", "delete", "any", "files", ".", "It", "only", "changes", "the", "machine", "configuration", "and", "the", "list", "of", "registered", "machines", "in", "the", "VirtualBox", "object", ".", "To", "delete", "the", "files", "which", "belonged", "to", "the", "machine", "including", "the", "XML", "file", "of", "the", "machine", "itself", "call", ":", "py", ":", "func", ":", "delete_config", "optionally", "with", "the", "array", "of", "IMedium", "objects", "which", "was", "returned", "from", "this", "method", ".", "How", "thoroughly", "this", "method", "cleans", "up", "the", "machine", "configuration", "before", "unregistering", "the", "machine", "depends", "on", "the", "@a", "cleanupMode", "argument", ".", "With", "UnregisterOnly", "the", "machine", "will", "only", "be", "unregistered", "but", "no", "additional", "cleanup", "will", "be", "performed", ".", "The", "call", "will", "fail", "if", "the", "machine", "is", "in", "Saved", "state", "or", "has", "any", "snapshots", "or", "any", "media", "attached", "(", "see", ":", "py", ":", "class", ":", "IMediumAttachment", ")", ".", "It", "is", "the", "responsibility", "of", "the", "caller", "to", "delete", "all", "such", "configuration", "in", "this", "mode", ".", "In", "this", "mode", "the", "API", "behaves", "like", "the", "former", "@c", "IVirtualBox", "::", "unregisterMachine", "()", "API", "which", "it", "replaces", ".", "With", "DetachAllReturnNone", "the", "call", "will", "succeed", "even", "if", "the", "machine", "is", "in", "Saved", "state", "or", "if", "it", "has", "snapshots", "or", "media", "attached", ".", "All", "media", "attached", "to", "the", "current", "machine", "state", "or", "in", "snapshots", "will", "be", "detached", ".", "No", "medium", "objects", "will", "be", "returned", ";", "all", "of", "the", "machine", "s", "media", "will", "remain", "open", ".", "With", "DetachAllReturnHardDisksOnly", "the", "call", "will", "behave", "like", "with", "DetachAllReturnNone", "except", "that", "all", "the", "hard", "disk", "medium", "objects", "which", "were", "detached", "from", "the", "machine", "will", "be", "returned", "as", "an", "array", ".", "This", "allows", "for", "quickly", "passing", "them", "to", "the", ":", "py", ":", "func", ":", "delete_config", "API", "for", "closing", "and", "deletion", ".", "With", "Full", "the", "call", "will", "behave", "like", "with", "DetachAllReturnHardDisksOnly", "except", "that", "all", "media", "will", "be", "returned", "in", "the", "array", "including", "removable", "media", "like", "DVDs", "and", "floppies", ".", "This", "might", "be", "useful", "if", "the", "user", "wants", "to", "inspect", "in", "detail", "which", "media", "were", "attached", "to", "the", "machine", ".", "Be", "careful", "when", "passing", "the", "media", "array", "to", ":", "py", ":", "func", ":", "delete_config", "in", "that", "case", "because", "users", "will", "typically", "want", "to", "preserve", "ISO", "and", "RAW", "image", "files", ".", "A", "typical", "implementation", "will", "use", "DetachAllReturnHardDisksOnly", "and", "then", "pass", "the", "resulting", "IMedium", "array", "to", ":", "py", ":", "func", ":", "delete_config", ".", "This", "way", "the", "machine", "is", "completely", "deleted", "with", "all", "its", "saved", "states", "and", "hard", "disk", "images", "but", "images", "for", "removable", "drives", "(", "such", "as", "ISO", "and", "RAW", "files", ")", "will", "remain", "on", "disk", ".", "This", "API", "does", "not", "verify", "whether", "the", "media", "files", "returned", "in", "the", "array", "are", "still", "attached", "to", "other", "machines", "(", "i", ".", "e", ".", "shared", "between", "several", "machines", ")", ".", "If", "such", "a", "shared", "image", "is", "passed", "to", ":", "py", ":", "func", ":", "delete_config", "however", "closing", "the", "image", "will", "fail", "there", "and", "the", "image", "will", "be", "silently", "skipped", ".", "This", "API", "may", "however", "move", "media", "from", "this", "machine", "s", "media", "registry", "to", "other", "media", "registries", "(", "see", ":", "py", ":", "class", ":", "IMedium", "for", "details", "on", "media", "registries", ")", ".", "For", "machines", "created", "with", "VirtualBox", "4", ".", "0", "or", "later", "if", "media", "from", "this", "machine", "s", "media", "registry", "are", "also", "attached", "to", "another", "machine", "(", "shared", "attachments", ")", "each", "such", "medium", "will", "be", "moved", "to", "another", "machine", "s", "registry", ".", "This", "is", "because", "without", "this", "machine", "s", "media", "registry", "the", "other", "machine", "cannot", "find", "its", "media", "any", "more", "and", "would", "become", "inaccessible", ".", "This", "API", "implicitly", "calls", ":", "py", ":", "func", ":", "save_settings", "to", "save", "all", "current", "machine", "settings", "before", "unregistering", "it", ".", "It", "may", "also", "silently", "call", ":", "py", ":", "func", ":", "save_settings", "on", "other", "machines", "if", "media", "are", "moved", "to", "other", "machines", "media", "registries", ".", "After", "successful", "method", "invocation", "the", ":", "py", ":", "class", ":", "IMachineRegisteredEvent", "event", "is", "fired", ".", "The", "call", "will", "fail", "if", "the", "machine", "is", "currently", "locked", "(", "see", ":", "py", ":", "class", ":", "ISession", ")", ".", "If", "the", "given", "machine", "is", "inaccessible", "(", "see", ":", "py", ":", "func", ":", "accessible", ")", "it", "will", "be", "unregistered", "and", "fully", "uninitialized", "right", "afterwards", ".", "As", "a", "result", "the", "returned", "machine", "object", "will", "be", "unusable", "and", "an", "attempt", "to", "call", "**", "any", "**", "method", "will", "return", "the", "Object", "not", "ready", "error", "." ]
python
train
60.654762
pycontribs/jira
jira/client.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L3738-L3745
def removed_issues(self, board_id, sprint_id): """Return the completed issues for the sprint.""" r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id), base=self.AGILE_BASE_URL) issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in r_json['contents']['puntedIssues']] return issues
[ "def", "removed_issues", "(", "self", ",", "board_id", ",", "sprint_id", ")", ":", "r_json", "=", "self", ".", "_get_json", "(", "'rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s'", "%", "(", "board_id", ",", "sprint_id", ")", ",", "base", "=", "self", ".", "AGILE_BASE_URL", ")", "issues", "=", "[", "Issue", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw_issues_json", ")", "for", "raw_issues_json", "in", "r_json", "[", "'contents'", "]", "[", "'puntedIssues'", "]", "]", "return", "issues" ]
Return the completed issues for the sprint.
[ "Return", "the", "completed", "issues", "for", "the", "sprint", "." ]
python
train
54.75
weld-project/weld
python/grizzly/grizzly/groupbyweld.py
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/groupbyweld.py#L236-L255
def get_column(self, column_name, column_type, index, verbose=True): """Summary Args: column_name (TYPE): Description column_type (TYPE): Description index (TYPE): Description Returns: TYPE: Description """ return LazyOpResult( grizzly_impl.get_column( self.expr, self.weld_type, index ), column_type, 1 )
[ "def", "get_column", "(", "self", ",", "column_name", ",", "column_type", ",", "index", ",", "verbose", "=", "True", ")", ":", "return", "LazyOpResult", "(", "grizzly_impl", ".", "get_column", "(", "self", ".", "expr", ",", "self", ".", "weld_type", ",", "index", ")", ",", "column_type", ",", "1", ")" ]
Summary Args: column_name (TYPE): Description column_type (TYPE): Description index (TYPE): Description Returns: TYPE: Description
[ "Summary" ]
python
train
24
googlefonts/ufo2ft
Lib/ufo2ft/util.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/util.py#L277-L369
def calcCodePageRanges(unicodes): """ Given a set of Unicode codepoints (integers), calculate the corresponding OS/2 CodePage range bits. This is a direct translation of FontForge implementation: https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158 """ codepageRanges = set() chars = [unichr(u) for u in unicodes] hasAscii = set(range(0x20, 0x7E)).issubset(unicodes) hasLineart = "┤" in chars for char in chars: if char == "Þ" and hasAscii: codepageRanges.add(0) # Latin 1 elif char == "Ľ" and hasAscii: codepageRanges.add(1) # Latin 2: Eastern Europe if hasLineart: codepageRanges.add(58) # Latin 2 elif char == "Б": codepageRanges.add(2) # Cyrillic if "Ѕ" in chars and hasLineart: codepageRanges.add(57) # IBM Cyrillic if "╜" in chars and hasLineart: codepageRanges.add(49) # MS-DOS Russian elif char == "Ά": codepageRanges.add(3) # Greek if hasLineart and "½" in chars: codepageRanges.add(48) # IBM Greek if hasLineart and "√" in chars: codepageRanges.add(60) # Greek, former 437 G elif char == "İ" and hasAscii: codepageRanges.add(4) # Turkish if hasLineart: codepageRanges.add(56) # IBM turkish elif char == "א": codepageRanges.add(5) # Hebrew if hasLineart and "√" in chars: codepageRanges.add(53) # Hebrew elif char == "ر": codepageRanges.add(6) # Arabic if "√" in chars: codepageRanges.add(51) # Arabic if hasLineart: codepageRanges.add(61) # Arabic; ASMO 708 elif char == "ŗ" and hasAscii: codepageRanges.add(7) # Windows Baltic if hasLineart: codepageRanges.add(59) # MS-DOS Baltic elif char == "₫" and hasAscii: codepageRanges.add(8) # Vietnamese elif char == "ๅ": codepageRanges.add(16) # Thai elif char == "エ": codepageRanges.add(17) # JIS/Japan elif char == "ㄅ": codepageRanges.add(18) # Chinese: Simplified chars elif char == "ㄱ": codepageRanges.add(19) # Korean wansung elif char == "央": codepageRanges.add(20) # Chinese: Traditional chars elif char == "곴": codepageRanges.add(21) # Korean Johab elif char == "♥" and hasAscii: codepageRanges.add(30) # OEM Character Set # TODO: Symbol bit has a special meaning (check the spec), we need # to confirm if this is wanted by default. # elif unichr(0xF000) <= char <= unichr(0xF0FF): # codepageRanges.add(31) # Symbol Character Set elif char == "þ" and hasAscii and hasLineart: codepageRanges.add(54) # MS-DOS Icelandic elif char == "╚" and hasAscii: codepageRanges.add(62) # WE/Latin 1 codepageRanges.add(63) # US elif hasAscii and hasLineart and "√" in chars: if char == "Å": codepageRanges.add(50) # MS-DOS Nordic elif char == "é": codepageRanges.add(52) # MS-DOS Canadian French elif char == "õ": codepageRanges.add(55) # MS-DOS Portuguese if hasAscii and "‰" in chars and "∑" in chars: codepageRanges.add(29) # Macintosh Character Set (US Roman) # when no codepage ranges can be enabled, fall back to enabling bit 0 # (Latin 1) so that the font works in MS Word: # https://github.com/googlei18n/fontmake/issues/468 if not codepageRanges: codepageRanges.add(0) return codepageRanges
[ "def", "calcCodePageRanges", "(", "unicodes", ")", ":", "codepageRanges", "=", "set", "(", ")", "chars", "=", "[", "unichr", "(", "u", ")", "for", "u", "in", "unicodes", "]", "hasAscii", "=", "set", "(", "range", "(", "0x20", ",", "0x7E", ")", ")", ".", "issubset", "(", "unicodes", ")", "hasLineart", "=", "\"┤\" i", " c", "ars", "for", "char", "in", "chars", ":", "if", "char", "==", "\"Þ\" ", "nd ", "asAscii:", "", "codepageRanges", ".", "add", "(", "0", ")", "# Latin 1", "elif", "char", "==", "\"Ľ\" ", "nd ", "asAscii:", "", "codepageRanges", ".", "add", "(", "1", ")", "# Latin 2: Eastern Europe", "if", "hasLineart", ":", "codepageRanges", ".", "add", "(", "58", ")", "# Latin 2", "elif", "char", "==", "\"Б\":", "", "codepageRanges", ".", "add", "(", "2", ")", "# Cyrillic", "if", "\"Ѕ\" ", "n ", "hars ", "nd ", "asLineart:", "", "codepageRanges", ".", "add", "(", "57", ")", "# IBM Cyrillic", "if", "\"╜\" i", " c", "ars a", "d h", "sLineart:", "", "codepageRanges", ".", "add", "(", "49", ")", "# MS-DOS Russian", "elif", "char", "==", "\"Ά\":", "", "codepageRanges", ".", "add", "(", "3", ")", "# Greek", "if", "hasLineart", "and", "\"½\" ", "n ", "hars:", "", "codepageRanges", ".", "add", "(", "48", ")", "# IBM Greek", "if", "hasLineart", "and", "\"√\" i", " c", "ars:", "", "codepageRanges", ".", "add", "(", "60", ")", "# Greek, former 437 G", "elif", "char", "==", "\"İ\" ", "nd ", "asAscii:", "", "codepageRanges", ".", "add", "(", "4", ")", "# Turkish", "if", "hasLineart", ":", "codepageRanges", ".", "add", "(", "56", ")", "# IBM turkish", "elif", "char", "==", "\"א\":", "", "codepageRanges", ".", "add", "(", "5", ")", "# Hebrew", "if", "hasLineart", "and", "\"√\" i", " c", "ars:", "", "codepageRanges", ".", "add", "(", "53", ")", "# Hebrew", "elif", "char", "==", "\"ر\":", "", "codepageRanges", ".", "add", "(", "6", ")", "# Arabic", "if", "\"√\" i", " c", "ars:", "", "codepageRanges", ".", "add", "(", "51", ")", "# Arabic", "if", "hasLineart", ":", "codepageRanges", ".", "add", "(", "61", ")", "# Arabic; ASMO 708", "elif", "char", "==", "\"ŗ\" ", "nd ", "asAscii:", "", "codepageRanges", ".", "add", "(", "7", ")", "# Windows Baltic", "if", "hasLineart", ":", "codepageRanges", ".", "add", "(", "59", ")", "# MS-DOS Baltic", "elif", "char", "==", "\"₫\" a", "d h", "sAscii:", "", "codepageRanges", ".", "add", "(", "8", ")", "# Vietnamese", "elif", "char", "==", "\"ๅ\":", "", "codepageRanges", ".", "add", "(", "16", ")", "# Thai", "elif", "char", "==", "\"エ\":", "", "codepageRanges", ".", "add", "(", "17", ")", "# JIS/Japan", "elif", "char", "==", "\"ㄅ\":", "", "codepageRanges", ".", "add", "(", "18", ")", "# Chinese: Simplified chars", "elif", "char", "==", "\"ㄱ\":", "", "codepageRanges", ".", "add", "(", "19", ")", "# Korean wansung", "elif", "char", "==", "\"央\":", "", "codepageRanges", ".", "add", "(", "20", ")", "# Chinese: Traditional chars", "elif", "char", "==", "\"곴\":", "", "codepageRanges", ".", "add", "(", "21", ")", "# Korean Johab", "elif", "char", "==", "\"♥\" a", "d h", "sAscii:", "", "codepageRanges", ".", "add", "(", "30", ")", "# OEM Character Set", "# TODO: Symbol bit has a special meaning (check the spec), we need", "# to confirm if this is wanted by default.", "# elif unichr(0xF000) <= char <= unichr(0xF0FF):", "# codepageRanges.add(31) # Symbol Character Set", "elif", "char", "==", "\"þ\" ", "nd ", "asAscii ", "nd ", "asLineart:", "", "codepageRanges", ".", "add", "(", "54", ")", "# MS-DOS Icelandic", "elif", "char", "==", "\"╚\" a", "d h", "sAscii:", "", "codepageRanges", ".", "add", "(", "62", ")", "# WE/Latin 1", "codepageRanges", ".", "add", "(", "63", ")", "# US", "elif", "hasAscii", "and", "hasLineart", "and", "\"√\" i", " c", "ars:", "", "if", "char", "==", "\"Å\":", "", "codepageRanges", ".", "add", "(", "50", ")", "# MS-DOS Nordic", "elif", "char", "==", "\"é\":", "", "codepageRanges", ".", "add", "(", "52", ")", "# MS-DOS Canadian French", "elif", "char", "==", "\"õ\":", "", "codepageRanges", ".", "add", "(", "55", ")", "# MS-DOS Portuguese", "if", "hasAscii", "and", "\"‰\" i", " c", "ars a", "d \"", "\" in ", "ha", "s:", "", "codepageRanges", ".", "add", "(", "29", ")", "# Macintosh Character Set (US Roman)", "# when no codepage ranges can be enabled, fall back to enabling bit 0", "# (Latin 1) so that the font works in MS Word:", "# https://github.com/googlei18n/fontmake/issues/468", "if", "not", "codepageRanges", ":", "codepageRanges", ".", "add", "(", "0", ")", "return", "codepageRanges" ]
Given a set of Unicode codepoints (integers), calculate the corresponding OS/2 CodePage range bits. This is a direct translation of FontForge implementation: https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
[ "Given", "a", "set", "of", "Unicode", "codepoints", "(", "integers", ")", "calculate", "the", "corresponding", "OS", "/", "2", "CodePage", "range", "bits", ".", "This", "is", "a", "direct", "translation", "of", "FontForge", "implementation", ":", "https", ":", "//", "github", ".", "com", "/", "fontforge", "/", "fontforge", "/", "blob", "/", "7b2c074", "/", "fontforge", "/", "tottf", ".", "c#L3158" ]
python
train
43.11828
gplepage/gvar
src/gvar/__init__.py
https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L209-L320
def chi2(g1, g2=None, svdcut=1e-12, nocorr=False): """ Compute chi**2 of ``g1-g2``. ``chi**2`` is a measure of whether the multi-dimensional Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays) agree with each other --- that is, do their means agree within errors for corresponding elements. The probability is high if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller. Usually ``g1`` and ``g2`` are dictionaries with the same keys, where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of |GVar|\s having the same shape. Alternatively ``g1`` and ``g2`` can be |GVar|\s, or arrays of |GVar|\s having the same shape. One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s, in which case ``chi**2`` is a measure of the likelihood that the numbers came from the distribution specified by the other argument. One or the other of ``g1`` or ``g2`` can be missing keys, or missing elements from arrays. Only the parts of ``g1`` and ``g2`` that overlap are used. Also setting ``g2=None`` is equivalent to replacing its elements by zeros. ``chi**2`` is computed from the inverse of the covariance matrix of ``g1-g2``. The matrix inversion can be sensitive to roundoff errors. In such cases, SVD cuts can be applied by setting parameters ``svdcut``; see the documentation for :func:`gvar.svd`, which is used to apply the cut. The return value is the ``chi**2``. Extra attributes attached to this value give additional information: - **dof** --- Number of degrees of freedom (that is, the number of variables compared). - **Q** --- The probability that the ``chi**2`` could have been larger, by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1 or so suggest that they do not agree. Also called the *p-value*. """ # customized class for answer class ans(float): def __new__(cls, chi2, dof, Q): return float.__new__(cls, chi2) def __init__(self, chi2, dof, Q): self.dof = dof self.Q = Q self.chi2 = chi2 # leaving nocorr (turn off correlations) undocumented because I # suspect I will remove it if g2 is None: diff = BufferDict(g1).buf if hasattr(g1, 'keys') else numpy.asarray(g1).flatten() elif hasattr(g1, 'keys') and hasattr(g2, 'keys'): # g1 and g2 are dictionaries g1 = BufferDict(g1) g2 = BufferDict(g2) diff = BufferDict() keys = set(g1.keys()) keys = keys.intersection(g2.keys()) for k in keys: g1k = g1[k] g2k = g2[k] shape = tuple( [min(s1,s2) for s1, s2 in zip(numpy.shape(g1k), numpy.shape(g2k))] ) diff[k] = numpy.zeros(shape, object) if len(shape) == 0: diff[k] = g1k - g2k else: for i in numpy.ndindex(shape): diff[k][i] = g1k[i] - g2k[i] diff = diff.buf elif not hasattr(g1, 'keys') and not hasattr(g2, 'keys'): # g1 and g2 are arrays or scalars g1 = numpy.asarray(g1) g2 = numpy.asarray(g2) shape = tuple( [min(s1,s2) for s1, s2 in zip(numpy.shape(g1), numpy.shape(g2))] ) diff = numpy.zeros(shape, object) if len(shape) == 0: diff = numpy.array(g1 - g2) else: for i in numpy.ndindex(shape): diff[i] = g1[i] - g2[i] diff = diff.flatten() else: # g1 and g2 are something else raise ValueError( 'cannot compute chi**2 for types ' + str(type(g1)) + ' ' + str(type(g2)) ) dof = diff.size if dof == 0: return ans(0.0, 0, 0) if nocorr: # ignore correlations chi2 = numpy.sum(mean(diff) ** 2 / var(diff)) dof = len(diff) else: diffmod, i_wgts = svd(diff, svdcut=svdcut, wgts=-1) diffmean = mean(diffmod) i, wgts = i_wgts[0] chi2 = 0.0 if len(i) > 0: chi2 += numpy.sum((diffmean[i] * wgts) ** 2) for i, wgts in i_wgts[1:]: chi2 += numpy.sum(wgts.dot(diffmean[i]) ** 2) dof = sum(len(wgts) for i, wgts in i_wgts) Q = gammaQ(dof/2., chi2/2.) return ans(chi2, dof=dof, Q=Q)
[ "def", "chi2", "(", "g1", ",", "g2", "=", "None", ",", "svdcut", "=", "1e-12", ",", "nocorr", "=", "False", ")", ":", "# customized class for answer", "class", "ans", "(", "float", ")", ":", "def", "__new__", "(", "cls", ",", "chi2", ",", "dof", ",", "Q", ")", ":", "return", "float", ".", "__new__", "(", "cls", ",", "chi2", ")", "def", "__init__", "(", "self", ",", "chi2", ",", "dof", ",", "Q", ")", ":", "self", ".", "dof", "=", "dof", "self", ".", "Q", "=", "Q", "self", ".", "chi2", "=", "chi2", "# leaving nocorr (turn off correlations) undocumented because I", "# suspect I will remove it", "if", "g2", "is", "None", ":", "diff", "=", "BufferDict", "(", "g1", ")", ".", "buf", "if", "hasattr", "(", "g1", ",", "'keys'", ")", "else", "numpy", ".", "asarray", "(", "g1", ")", ".", "flatten", "(", ")", "elif", "hasattr", "(", "g1", ",", "'keys'", ")", "and", "hasattr", "(", "g2", ",", "'keys'", ")", ":", "# g1 and g2 are dictionaries", "g1", "=", "BufferDict", "(", "g1", ")", "g2", "=", "BufferDict", "(", "g2", ")", "diff", "=", "BufferDict", "(", ")", "keys", "=", "set", "(", "g1", ".", "keys", "(", ")", ")", "keys", "=", "keys", ".", "intersection", "(", "g2", ".", "keys", "(", ")", ")", "for", "k", "in", "keys", ":", "g1k", "=", "g1", "[", "k", "]", "g2k", "=", "g2", "[", "k", "]", "shape", "=", "tuple", "(", "[", "min", "(", "s1", ",", "s2", ")", "for", "s1", ",", "s2", "in", "zip", "(", "numpy", ".", "shape", "(", "g1k", ")", ",", "numpy", ".", "shape", "(", "g2k", ")", ")", "]", ")", "diff", "[", "k", "]", "=", "numpy", ".", "zeros", "(", "shape", ",", "object", ")", "if", "len", "(", "shape", ")", "==", "0", ":", "diff", "[", "k", "]", "=", "g1k", "-", "g2k", "else", ":", "for", "i", "in", "numpy", ".", "ndindex", "(", "shape", ")", ":", "diff", "[", "k", "]", "[", "i", "]", "=", "g1k", "[", "i", "]", "-", "g2k", "[", "i", "]", "diff", "=", "diff", ".", "buf", "elif", "not", "hasattr", "(", "g1", ",", "'keys'", ")", "and", "not", "hasattr", "(", "g2", ",", "'keys'", ")", ":", "# g1 and g2 are arrays or scalars", "g1", "=", "numpy", ".", "asarray", "(", "g1", ")", "g2", "=", "numpy", ".", "asarray", "(", "g2", ")", "shape", "=", "tuple", "(", "[", "min", "(", "s1", ",", "s2", ")", "for", "s1", ",", "s2", "in", "zip", "(", "numpy", ".", "shape", "(", "g1", ")", ",", "numpy", ".", "shape", "(", "g2", ")", ")", "]", ")", "diff", "=", "numpy", ".", "zeros", "(", "shape", ",", "object", ")", "if", "len", "(", "shape", ")", "==", "0", ":", "diff", "=", "numpy", ".", "array", "(", "g1", "-", "g2", ")", "else", ":", "for", "i", "in", "numpy", ".", "ndindex", "(", "shape", ")", ":", "diff", "[", "i", "]", "=", "g1", "[", "i", "]", "-", "g2", "[", "i", "]", "diff", "=", "diff", ".", "flatten", "(", ")", "else", ":", "# g1 and g2 are something else", "raise", "ValueError", "(", "'cannot compute chi**2 for types '", "+", "str", "(", "type", "(", "g1", ")", ")", "+", "' '", "+", "str", "(", "type", "(", "g2", ")", ")", ")", "dof", "=", "diff", ".", "size", "if", "dof", "==", "0", ":", "return", "ans", "(", "0.0", ",", "0", ",", "0", ")", "if", "nocorr", ":", "# ignore correlations", "chi2", "=", "numpy", ".", "sum", "(", "mean", "(", "diff", ")", "**", "2", "/", "var", "(", "diff", ")", ")", "dof", "=", "len", "(", "diff", ")", "else", ":", "diffmod", ",", "i_wgts", "=", "svd", "(", "diff", ",", "svdcut", "=", "svdcut", ",", "wgts", "=", "-", "1", ")", "diffmean", "=", "mean", "(", "diffmod", ")", "i", ",", "wgts", "=", "i_wgts", "[", "0", "]", "chi2", "=", "0.0", "if", "len", "(", "i", ")", ">", "0", ":", "chi2", "+=", "numpy", ".", "sum", "(", "(", "diffmean", "[", "i", "]", "*", "wgts", ")", "**", "2", ")", "for", "i", ",", "wgts", "in", "i_wgts", "[", "1", ":", "]", ":", "chi2", "+=", "numpy", ".", "sum", "(", "wgts", ".", "dot", "(", "diffmean", "[", "i", "]", ")", "**", "2", ")", "dof", "=", "sum", "(", "len", "(", "wgts", ")", "for", "i", ",", "wgts", "in", "i_wgts", ")", "Q", "=", "gammaQ", "(", "dof", "/", "2.", ",", "chi2", "/", "2.", ")", "return", "ans", "(", "chi2", ",", "dof", "=", "dof", ",", "Q", "=", "Q", ")" ]
Compute chi**2 of ``g1-g2``. ``chi**2`` is a measure of whether the multi-dimensional Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays) agree with each other --- that is, do their means agree within errors for corresponding elements. The probability is high if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller. Usually ``g1`` and ``g2`` are dictionaries with the same keys, where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of |GVar|\s having the same shape. Alternatively ``g1`` and ``g2`` can be |GVar|\s, or arrays of |GVar|\s having the same shape. One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s, in which case ``chi**2`` is a measure of the likelihood that the numbers came from the distribution specified by the other argument. One or the other of ``g1`` or ``g2`` can be missing keys, or missing elements from arrays. Only the parts of ``g1`` and ``g2`` that overlap are used. Also setting ``g2=None`` is equivalent to replacing its elements by zeros. ``chi**2`` is computed from the inverse of the covariance matrix of ``g1-g2``. The matrix inversion can be sensitive to roundoff errors. In such cases, SVD cuts can be applied by setting parameters ``svdcut``; see the documentation for :func:`gvar.svd`, which is used to apply the cut. The return value is the ``chi**2``. Extra attributes attached to this value give additional information: - **dof** --- Number of degrees of freedom (that is, the number of variables compared). - **Q** --- The probability that the ``chi**2`` could have been larger, by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1 or so suggest that they do not agree. Also called the *p-value*.
[ "Compute", "chi", "**", "2", "of", "g1", "-", "g2", "." ]
python
train
38.160714
SpriteLink/NIPAP
pynipap/pynipap.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L548-L581
def smart_search(cls, query_string, search_options=None, extra_query = None): """ Perform a smart VRF search. Maps to the function :py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend. Please see the documentation for the backend function for information regarding input arguments and return values. """ if search_options is None: search_options = {} xmlrpc = XMLRPCConnection() try: smart_result = xmlrpc.connection.smart_search_vrf( { 'query_string': query_string, 'search_options': search_options, 'auth': AuthOptions().options, 'extra_query': extra_query }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) result = dict() result['interpretation'] = smart_result['interpretation'] result['search_options'] = smart_result['search_options'] result['error'] = smart_result['error'] if 'error_message' in smart_result: result['error_message'] = smart_result['error_message'] result['result'] = list() for v in smart_result['result']: result['result'].append(VRF.from_dict(v)) return result
[ "def", "smart_search", "(", "cls", ",", "query_string", ",", "search_options", "=", "None", ",", "extra_query", "=", "None", ")", ":", "if", "search_options", "is", "None", ":", "search_options", "=", "{", "}", "xmlrpc", "=", "XMLRPCConnection", "(", ")", "try", ":", "smart_result", "=", "xmlrpc", ".", "connection", ".", "smart_search_vrf", "(", "{", "'query_string'", ":", "query_string", ",", "'search_options'", ":", "search_options", ",", "'auth'", ":", "AuthOptions", "(", ")", ".", "options", ",", "'extra_query'", ":", "extra_query", "}", ")", "except", "xmlrpclib", ".", "Fault", "as", "xml_fault", ":", "raise", "_fault_to_exception", "(", "xml_fault", ")", "result", "=", "dict", "(", ")", "result", "[", "'interpretation'", "]", "=", "smart_result", "[", "'interpretation'", "]", "result", "[", "'search_options'", "]", "=", "smart_result", "[", "'search_options'", "]", "result", "[", "'error'", "]", "=", "smart_result", "[", "'error'", "]", "if", "'error_message'", "in", "smart_result", ":", "result", "[", "'error_message'", "]", "=", "smart_result", "[", "'error_message'", "]", "result", "[", "'result'", "]", "=", "list", "(", ")", "for", "v", "in", "smart_result", "[", "'result'", "]", ":", "result", "[", "'result'", "]", ".", "append", "(", "VRF", ".", "from_dict", "(", "v", ")", ")", "return", "result" ]
Perform a smart VRF search. Maps to the function :py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend. Please see the documentation for the backend function for information regarding input arguments and return values.
[ "Perform", "a", "smart", "VRF", "search", "." ]
python
train
39.029412
allenai/allennlp
allennlp/training/tensorboard_writer.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/tensorboard_writer.py#L133-L139
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None: """ Send histograms of parameters to tensorboard. """ for name, param in model.named_parameters(): if name in histogram_parameters: self.add_train_histogram("parameter_histogram/" + name, param)
[ "def", "log_histograms", "(", "self", ",", "model", ":", "Model", ",", "histogram_parameters", ":", "Set", "[", "str", "]", ")", "->", "None", ":", "for", "name", ",", "param", "in", "model", ".", "named_parameters", "(", ")", ":", "if", "name", "in", "histogram_parameters", ":", "self", ".", "add_train_histogram", "(", "\"parameter_histogram/\"", "+", "name", ",", "param", ")" ]
Send histograms of parameters to tensorboard.
[ "Send", "histograms", "of", "parameters", "to", "tensorboard", "." ]
python
train
46.857143
ubernostrum/django-flashpolicies
flashpolicies/policies.py
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L200-L213
def _add_header_domains_xml(self, document): """ Generates the XML elements for allowed header domains. """ for domain, attrs in self.header_domains.items(): header_element = document.createElement( 'allow-http-request-headers-from' ) header_element.setAttribute('domain', domain) header_element.setAttribute('headers', ','.join(attrs['headers'])) if not attrs['secure']: header_element.setAttribute('secure', 'false') document.documentElement.appendChild(header_element)
[ "def", "_add_header_domains_xml", "(", "self", ",", "document", ")", ":", "for", "domain", ",", "attrs", "in", "self", ".", "header_domains", ".", "items", "(", ")", ":", "header_element", "=", "document", ".", "createElement", "(", "'allow-http-request-headers-from'", ")", "header_element", ".", "setAttribute", "(", "'domain'", ",", "domain", ")", "header_element", ".", "setAttribute", "(", "'headers'", ",", "','", ".", "join", "(", "attrs", "[", "'headers'", "]", ")", ")", "if", "not", "attrs", "[", "'secure'", "]", ":", "header_element", ".", "setAttribute", "(", "'secure'", ",", "'false'", ")", "document", ".", "documentElement", ".", "appendChild", "(", "header_element", ")" ]
Generates the XML elements for allowed header domains.
[ "Generates", "the", "XML", "elements", "for", "allowed", "header", "domains", "." ]
python
train
42.5
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L564-L574
def filter_useless_pass(source): """Yield code with useless "pass" lines removed.""" try: marked_lines = frozenset(useless_pass_line_numbers(source)) except (SyntaxError, tokenize.TokenError): marked_lines = frozenset() sio = io.StringIO(source) for line_number, line in enumerate(sio.readlines(), start=1): if line_number not in marked_lines: yield line
[ "def", "filter_useless_pass", "(", "source", ")", ":", "try", ":", "marked_lines", "=", "frozenset", "(", "useless_pass_line_numbers", "(", "source", ")", ")", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "marked_lines", "=", "frozenset", "(", ")", "sio", "=", "io", ".", "StringIO", "(", "source", ")", "for", "line_number", ",", "line", "in", "enumerate", "(", "sio", ".", "readlines", "(", ")", ",", "start", "=", "1", ")", ":", "if", "line_number", "not", "in", "marked_lines", ":", "yield", "line" ]
Yield code with useless "pass" lines removed.
[ "Yield", "code", "with", "useless", "pass", "lines", "removed", "." ]
python
test
36.454545
etcher-be/emiz
emiz/avwx/metar.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/metar.py#L23-L28
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore """ Returns MetarData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt)
[ "def", "parse", "(", "station", ":", "str", ",", "txt", ":", "str", ")", "->", "(", "MetarData", ",", "Units", ")", ":", "# type: ignore", "core", ".", "valid_station", "(", "station", ")", "return", "parse_na", "(", "txt", ")", "if", "core", ".", "uses_na_format", "(", "station", "[", ":", "2", "]", ")", "else", "parse_in", "(", "txt", ")" ]
Returns MetarData and Units dataclasses with parsed data and their associated units
[ "Returns", "MetarData", "and", "Units", "dataclasses", "with", "parsed", "data", "and", "their", "associated", "units" ]
python
train
47.166667
mk-fg/txboxdotnet
txboxdotnet/api_v2.py
https://github.com/mk-fg/txboxdotnet/blob/4a3e48fbe1388c5e2a17e808aaaf6b2460e61f48/txboxdotnet/api_v2.py#L213-L219
def listdir(self, folder_id='0', offset=None, limit=None, fields=None): 'Get Box object, representing list of objects in a folder.' if fields is not None\ and not isinstance(fields, types.StringTypes): fields = ','.join(fields) return self( join('folders', folder_id, 'items'), dict(offset=offset, limit=limit, fields=fields) )
[ "def", "listdir", "(", "self", ",", "folder_id", "=", "'0'", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "fields", "=", "None", ")", ":", "if", "fields", "is", "not", "None", "and", "not", "isinstance", "(", "fields", ",", "types", ".", "StringTypes", ")", ":", "fields", "=", "','", ".", "join", "(", "fields", ")", "return", "self", "(", "join", "(", "'folders'", ",", "folder_id", ",", "'items'", ")", ",", "dict", "(", "offset", "=", "offset", ",", "limit", "=", "limit", ",", "fields", "=", "fields", ")", ")" ]
Get Box object, representing list of objects in a folder.
[ "Get", "Box", "object", "representing", "list", "of", "objects", "in", "a", "folder", "." ]
python
train
48
PGower/PyCanvas
pycanvas/apis/submissions.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L387-L416
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_submission_courses", "(", "self", ",", "user_id", ",", "course_id", ",", "assignment_id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - PATH - assignment_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"assignment_id\"", "]", "=", "assignment_id", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "# OPTIONAL - include\r", "\"\"\"Associations to include with the group.\"\"\"", "if", "include", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "include", ",", "[", "\"submission_history\"", ",", "\"submission_comments\"", ",", "\"rubric_assessment\"", ",", "\"visibility\"", ",", "\"course\"", ",", "\"user\"", "]", ")", "params", "[", "\"include\"", "]", "=", "include", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
Get a single submission. Get a single submission, based on user id.
[ "Get", "a", "single", "submission", ".", "Get", "a", "single", "submission", "based", "on", "user", "id", "." ]
python
train
40.7
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1524-L1545
def json(self): """Produce a JSON representation for the web interface""" d = { 'id': self.id, 'format': self.formatclass.__name__,'label': self.label, 'mimetype': self.formatclass.mimetype, 'schema': self.formatclass.schema } if self.unique: d['unique'] = True if self.filename: d['filename'] = self.filename if self.extension: d['extension'] = self.extension if self.acceptarchive: d['acceptarchive'] = self.acceptarchive #d['parameters'] = {} #The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms parametersxml = '' for parameter in self.parameters: parametersxml += parameter.xml() d['parametersxml'] = '<?xml version="1.0" encoding="utf-8" ?><parameters>' + parametersxml + '</parameters>' d['converters'] = [ {'id':x.id, 'label':x.label} for x in self.converters ] d['inputsources'] = [ {'id':x.id, 'label':x.label} for x in self.inputsources ] return json.dumps(d)
[ "def", "json", "(", "self", ")", ":", "d", "=", "{", "'id'", ":", "self", ".", "id", ",", "'format'", ":", "self", ".", "formatclass", ".", "__name__", ",", "'label'", ":", "self", ".", "label", ",", "'mimetype'", ":", "self", ".", "formatclass", ".", "mimetype", ",", "'schema'", ":", "self", ".", "formatclass", ".", "schema", "}", "if", "self", ".", "unique", ":", "d", "[", "'unique'", "]", "=", "True", "if", "self", ".", "filename", ":", "d", "[", "'filename'", "]", "=", "self", ".", "filename", "if", "self", ".", "extension", ":", "d", "[", "'extension'", "]", "=", "self", ".", "extension", "if", "self", ".", "acceptarchive", ":", "d", "[", "'acceptarchive'", "]", "=", "self", ".", "acceptarchive", "#d['parameters'] = {}", "#The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms", "parametersxml", "=", "''", "for", "parameter", "in", "self", ".", "parameters", ":", "parametersxml", "+=", "parameter", ".", "xml", "(", ")", "d", "[", "'parametersxml'", "]", "=", "'<?xml version=\"1.0\" encoding=\"utf-8\" ?><parameters>'", "+", "parametersxml", "+", "'</parameters>'", "d", "[", "'converters'", "]", "=", "[", "{", "'id'", ":", "x", ".", "id", ",", "'label'", ":", "x", ".", "label", "}", "for", "x", "in", "self", ".", "converters", "]", "d", "[", "'inputsources'", "]", "=", "[", "{", "'id'", ":", "x", ".", "id", ",", "'label'", ":", "x", ".", "label", "}", "for", "x", "in", "self", ".", "inputsources", "]", "return", "json", ".", "dumps", "(", "d", ")" ]
Produce a JSON representation for the web interface
[ "Produce", "a", "JSON", "representation", "for", "the", "web", "interface" ]
python
train
49.590909
licenses/lice
lice/core.py
https://github.com/licenses/lice/blob/71635c2544d5edf9e93af4141467763916a86624/lice/core.py#L93-L100
def clean_path(p): """ Clean a path by expanding user and environment variables and ensuring absolute path. """ p = os.path.expanduser(p) p = os.path.expandvars(p) p = os.path.abspath(p) return p
[ "def", "clean_path", "(", "p", ")", ":", "p", "=", "os", ".", "path", ".", "expanduser", "(", "p", ")", "p", "=", "os", ".", "path", ".", "expandvars", "(", "p", ")", "p", "=", "os", ".", "path", ".", "abspath", "(", "p", ")", "return", "p" ]
Clean a path by expanding user and environment variables and ensuring absolute path.
[ "Clean", "a", "path", "by", "expanding", "user", "and", "environment", "variables", "and", "ensuring", "absolute", "path", "." ]
python
train
27.5
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/window.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/window.py#L670-L705
def move(self, x = None, y = None, width = None, height = None, bRepaint = True): """ Moves and/or resizes the window. @note: This is request is performed syncronously. @type x: int @param x: (Optional) New horizontal coordinate. @type y: int @param y: (Optional) New vertical coordinate. @type width: int @param width: (Optional) Desired window width. @type height: int @param height: (Optional) Desired window height. @type bRepaint: bool @param bRepaint: (Optional) C{True} if the window should be redrawn afterwards. @raise WindowsError: An error occured while processing this request. """ if None in (x, y, width, height): rect = self.get_screen_rect() if x is None: x = rect.left if y is None: y = rect.top if width is None: width = rect.right - rect.left if height is None: height = rect.bottom - rect.top win32.MoveWindow(self.get_handle(), x, y, width, height, bRepaint)
[ "def", "move", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ",", "width", "=", "None", ",", "height", "=", "None", ",", "bRepaint", "=", "True", ")", ":", "if", "None", "in", "(", "x", ",", "y", ",", "width", ",", "height", ")", ":", "rect", "=", "self", ".", "get_screen_rect", "(", ")", "if", "x", "is", "None", ":", "x", "=", "rect", ".", "left", "if", "y", "is", "None", ":", "y", "=", "rect", ".", "top", "if", "width", "is", "None", ":", "width", "=", "rect", ".", "right", "-", "rect", ".", "left", "if", "height", "is", "None", ":", "height", "=", "rect", ".", "bottom", "-", "rect", ".", "top", "win32", ".", "MoveWindow", "(", "self", ".", "get_handle", "(", ")", ",", "x", ",", "y", ",", "width", ",", "height", ",", "bRepaint", ")" ]
Moves and/or resizes the window. @note: This is request is performed syncronously. @type x: int @param x: (Optional) New horizontal coordinate. @type y: int @param y: (Optional) New vertical coordinate. @type width: int @param width: (Optional) Desired window width. @type height: int @param height: (Optional) Desired window height. @type bRepaint: bool @param bRepaint: (Optional) C{True} if the window should be redrawn afterwards. @raise WindowsError: An error occured while processing this request.
[ "Moves", "and", "/", "or", "resizes", "the", "window", "." ]
python
train
33
Neurosim-lab/netpyne
netpyne/support/filter.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/filter.py#L45-L86
def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True): """ Butterworth-Bandpass Filter. Filter data from ``freqmin`` to ``freqmax`` using ``corners`` corners. The filter uses :func:`scipy.signal.iirfilter` (for design) and :func:`scipy.signal.sosfilt` (for applying the filter). :type data: numpy.ndarray :param data: Data to filter. :param freqmin: Pass band low corner frequency. :param freqmax: Pass band high corner frequency. :param df: Sampling rate in Hz. :param corners: Filter corners / order. :param zerophase: If True, apply filter once forwards and once backwards. This results in twice the filter order but zero phase shift in the resulting filtered trace. :return: Filtered data. """ fe = 0.5 * df low = freqmin / fe high = freqmax / fe # raise for some bad scenarios if high - 1.0 > -1e-6: msg = ("Selected high corner frequency ({}) of bandpass is at or " "above Nyquist ({}). Applying a high-pass instead.").format( freqmax, fe) warnings.warn(msg) return highpass(data, freq=freqmin, df=df, corners=corners, zerophase=zerophase) if low > 1: msg = "Selected low corner frequency is above Nyquist." raise ValueError(msg) z, p, k = iirfilter(corners, [low, high], btype='band', ftype='butter', output='zpk') sos = zpk2sos(z, p, k) if zerophase: firstpass = sosfilt(sos, data) return sosfilt(sos, firstpass[::-1])[::-1] else: return sosfilt(sos, data)
[ "def", "bandpass", "(", "data", ",", "freqmin", ",", "freqmax", ",", "df", ",", "corners", "=", "4", ",", "zerophase", "=", "True", ")", ":", "fe", "=", "0.5", "*", "df", "low", "=", "freqmin", "/", "fe", "high", "=", "freqmax", "/", "fe", "# raise for some bad scenarios", "if", "high", "-", "1.0", ">", "-", "1e-6", ":", "msg", "=", "(", "\"Selected high corner frequency ({}) of bandpass is at or \"", "\"above Nyquist ({}). Applying a high-pass instead.\"", ")", ".", "format", "(", "freqmax", ",", "fe", ")", "warnings", ".", "warn", "(", "msg", ")", "return", "highpass", "(", "data", ",", "freq", "=", "freqmin", ",", "df", "=", "df", ",", "corners", "=", "corners", ",", "zerophase", "=", "zerophase", ")", "if", "low", ">", "1", ":", "msg", "=", "\"Selected low corner frequency is above Nyquist.\"", "raise", "ValueError", "(", "msg", ")", "z", ",", "p", ",", "k", "=", "iirfilter", "(", "corners", ",", "[", "low", ",", "high", "]", ",", "btype", "=", "'band'", ",", "ftype", "=", "'butter'", ",", "output", "=", "'zpk'", ")", "sos", "=", "zpk2sos", "(", "z", ",", "p", ",", "k", ")", "if", "zerophase", ":", "firstpass", "=", "sosfilt", "(", "sos", ",", "data", ")", "return", "sosfilt", "(", "sos", ",", "firstpass", "[", ":", ":", "-", "1", "]", ")", "[", ":", ":", "-", "1", "]", "else", ":", "return", "sosfilt", "(", "sos", ",", "data", ")" ]
Butterworth-Bandpass Filter. Filter data from ``freqmin`` to ``freqmax`` using ``corners`` corners. The filter uses :func:`scipy.signal.iirfilter` (for design) and :func:`scipy.signal.sosfilt` (for applying the filter). :type data: numpy.ndarray :param data: Data to filter. :param freqmin: Pass band low corner frequency. :param freqmax: Pass band high corner frequency. :param df: Sampling rate in Hz. :param corners: Filter corners / order. :param zerophase: If True, apply filter once forwards and once backwards. This results in twice the filter order but zero phase shift in the resulting filtered trace. :return: Filtered data.
[ "Butterworth", "-", "Bandpass", "Filter", "." ]
python
train
37.952381
LogicalDash/LiSE
LiSE/LiSE/engine.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/engine.py#L1460-L1474
def alias(self, v, stat='dummy'): """Return a representation of a value suitable for use in historical queries. It will behave much as if you assigned the value to some entity and then used its ``historical`` method to get a reference to the set of its past values, which happens to contain only the value you've provided here, ``v``. :arg v: the value to represent :arg stat: what name to pretend its stat has; usually irrelevant """ from .util import EntityStatAccessor r = DummyEntity(self) r[stat] = v return EntityStatAccessor(r, stat, engine=self)
[ "def", "alias", "(", "self", ",", "v", ",", "stat", "=", "'dummy'", ")", ":", "from", ".", "util", "import", "EntityStatAccessor", "r", "=", "DummyEntity", "(", "self", ")", "r", "[", "stat", "]", "=", "v", "return", "EntityStatAccessor", "(", "r", ",", "stat", ",", "engine", "=", "self", ")" ]
Return a representation of a value suitable for use in historical queries. It will behave much as if you assigned the value to some entity and then used its ``historical`` method to get a reference to the set of its past values, which happens to contain only the value you've provided here, ``v``. :arg v: the value to represent :arg stat: what name to pretend its stat has; usually irrelevant
[ "Return", "a", "representation", "of", "a", "value", "suitable", "for", "use", "in", "historical", "queries", "." ]
python
train
42
ewels/MultiQC
multiqc/modules/flash/flash.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L217-L231
def freqpoly_plot(data): """make freqpoly plot of merged read lengths""" rel_data = OrderedDict() for key, val in data.items(): tot = sum(val.values(), 0) rel_data[key] = {k: v / tot for k, v in val.items()} fplotconfig = { 'data_labels': [ {'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'}, {'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'} ], 'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths', 'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data)))) } return linegraph.plot([data, rel_data], fplotconfig)
[ "def", "freqpoly_plot", "(", "data", ")", ":", "rel_data", "=", "OrderedDict", "(", ")", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "tot", "=", "sum", "(", "val", ".", "values", "(", ")", ",", "0", ")", "rel_data", "[", "key", "]", "=", "{", "k", ":", "v", "/", "tot", "for", "k", ",", "v", "in", "val", ".", "items", "(", ")", "}", "fplotconfig", "=", "{", "'data_labels'", ":", "[", "{", "'name'", ":", "'Absolute'", ",", "'ylab'", ":", "'Frequency'", ",", "'xlab'", ":", "'Merged Read Length'", "}", ",", "{", "'name'", ":", "'Relative'", ",", "'ylab'", ":", "'Relative Frequency'", ",", "'xlab'", ":", "'Merged Read Length'", "}", "]", ",", "'id'", ":", "'flash_freqpoly_plot'", ",", "'title'", ":", "'FLASh: Frequency of merged read lengths'", ",", "'colors'", ":", "dict", "(", "zip", "(", "data", ".", "keys", "(", ")", ",", "MultiqcModule", ".", "get_colors", "(", "len", "(", "data", ")", ")", ")", ")", "}", "return", "linegraph", ".", "plot", "(", "[", "data", ",", "rel_data", "]", ",", "fplotconfig", ")" ]
make freqpoly plot of merged read lengths
[ "make", "freqpoly", "plot", "of", "merged", "read", "lengths" ]
python
train
49.933333
quodlibet/mutagen
mutagen/id3/_tags.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_tags.py#L491-L499
def _restore(self, value): """Restores the state copied with _copy()""" items, subs = value self.clear() for key, value in items: self[key] = value if key in subs: value.sub_frames._restore(subs[key])
[ "def", "_restore", "(", "self", ",", "value", ")", ":", "items", ",", "subs", "=", "value", "self", ".", "clear", "(", ")", "for", "key", ",", "value", "in", "items", ":", "self", "[", "key", "]", "=", "value", "if", "key", "in", "subs", ":", "value", ".", "sub_frames", ".", "_restore", "(", "subs", "[", "key", "]", ")" ]
Restores the state copied with _copy()
[ "Restores", "the", "state", "copied", "with", "_copy", "()" ]
python
train
29.444444
davenquinn/Attitude
attitude/stereonet.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/stereonet.py#L7-L11
def quaternion(vector, angle): """ Unit quaternion for a vector and an angle """ return N.cos(angle/2)+vector*N.sin(angle/2)
[ "def", "quaternion", "(", "vector", ",", "angle", ")", ":", "return", "N", ".", "cos", "(", "angle", "/", "2", ")", "+", "vector", "*", "N", ".", "sin", "(", "angle", "/", "2", ")" ]
Unit quaternion for a vector and an angle
[ "Unit", "quaternion", "for", "a", "vector", "and", "an", "angle" ]
python
train
27.2
angr/angr
angr/calling_conventions.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/calling_conventions.py#L522-L629
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True): """ This function performs the actions of the caller getting ready to jump into a function. :param state: The SimState to operate on :param ret_addr: The address to return to when the called function finishes :param args: The list of arguments that that the called function will see :param stack_base: An optional pointer to use as the top of the stack, circa the function entry point :param alloc_base: An optional pointer to use as the place to put excess argument data :param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the same type and size, while tuples (representing structs) can be elements of any type and size. If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value that can't fit in a register will be automatically put in a PointerWrapper. If stack_base is not provided, the current stack pointer will be used, and it will be updated. If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True. grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential allocations happen at increasing addresses. """ # STEP 0: clerical work if isinstance(self, SimCCSoot): SimEngineSoot.setup_callsite(state, args, ret_addr) return allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE') # # STEP 1: convert all values into serialized form # this entails creating the vals list of simple values to store and also populating the allocator's # understanding of what aux data needs to be stored # This is also where we compute arg locations (arg_locs) # if self.func_ty is not None: vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)] else: vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args] arg_session = self.arg_session arg_locs = [None]*len(args) for i, (arg, val) in enumerate(zip(args, vals)): if self.is_fp_value(arg) or \ (self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)): arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width) continue if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))): vals[i] = allocator.dump(val, state) elif val.length < state.arch.bits: if self.arch.memory_endness == 'Iend_LE': vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length)) else: vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val) arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width) # # STEP 2: decide on memory storage locations # implement the contract for stack_base/alloc_base/grow_like_stack # after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location, # and the stack pointer should be updated # if stack_base is None: if alloc_base is None: alloc_size = allocator.size() state.regs.sp -= alloc_size alloc_base = state.regs.sp grow_like_stack = False state.regs.sp -= self.stack_space(arg_locs) # handle alignment alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT state.regs.sp -= alignment else: state.regs.sp = stack_base if alloc_base is None: alloc_base = stack_base + self.stack_space(arg_locs) grow_like_stack = False if grow_like_stack: alloc_base -= allocator.size() if type(alloc_base) is int: alloc_base = claripy.BVV(alloc_base, state.arch.bits) for i, val in enumerate(vals): vals[i] = allocator.translate(val, alloc_base) # # STEP 3: store everything! # allocator.apply(state, alloc_base) for loc, val in zip(arg_locs, vals): if val.length > loc.size * 8: raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc))) loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base) self.return_addr.set_value(state, ret_addr, stack_base=stack_base)
[ "def", "setup_callsite", "(", "self", ",", "state", ",", "ret_addr", ",", "args", ",", "stack_base", "=", "None", ",", "alloc_base", "=", "None", ",", "grow_like_stack", "=", "True", ")", ":", "# STEP 0: clerical work", "if", "isinstance", "(", "self", ",", "SimCCSoot", ")", ":", "SimEngineSoot", ".", "setup_callsite", "(", "state", ",", "args", ",", "ret_addr", ")", "return", "allocator", "=", "AllocHelper", "(", "self", ".", "arch", ".", "bits", ",", "self", ".", "arch", ".", "memory_endness", "==", "'Iend_LE'", ")", "#", "# STEP 1: convert all values into serialized form", "# this entails creating the vals list of simple values to store and also populating the allocator's", "# understanding of what aux data needs to be stored", "# This is also where we compute arg locations (arg_locs)", "#", "if", "self", ".", "func_ty", "is", "not", "None", ":", "vals", "=", "[", "self", ".", "_standardize_value", "(", "arg", ",", "ty", ",", "state", ",", "allocator", ".", "dump", ")", "for", "arg", ",", "ty", "in", "zip", "(", "args", ",", "self", ".", "func_ty", ".", "args", ")", "]", "else", ":", "vals", "=", "[", "self", ".", "_standardize_value", "(", "arg", ",", "None", ",", "state", ",", "allocator", ".", "dump", ")", "for", "arg", "in", "args", "]", "arg_session", "=", "self", ".", "arg_session", "arg_locs", "=", "[", "None", "]", "*", "len", "(", "args", ")", "for", "i", ",", "(", "arg", ",", "val", ")", "in", "enumerate", "(", "zip", "(", "args", ",", "vals", ")", ")", ":", "if", "self", ".", "is_fp_value", "(", "arg", ")", "or", "(", "self", ".", "func_ty", "is", "not", "None", "and", "isinstance", "(", "self", ".", "func_ty", ".", "args", "[", "i", "]", ",", "SimTypeFloat", ")", ")", ":", "arg_locs", "[", "i", "]", "=", "arg_session", ".", "next_arg", "(", "is_fp", "=", "True", ",", "size", "=", "val", ".", "length", "//", "state", ".", "arch", ".", "byte_width", ")", "continue", "if", "val", ".", "length", ">", "state", ".", "arch", ".", "bits", "or", "(", "self", ".", "func_ty", "is", "None", "and", "isinstance", "(", "arg", ",", "(", "bytes", ",", "str", ",", "list", ",", "tuple", ")", ")", ")", ":", "vals", "[", "i", "]", "=", "allocator", ".", "dump", "(", "val", ",", "state", ")", "elif", "val", ".", "length", "<", "state", ".", "arch", ".", "bits", ":", "if", "self", ".", "arch", ".", "memory_endness", "==", "'Iend_LE'", ":", "vals", "[", "i", "]", "=", "val", ".", "concat", "(", "claripy", ".", "BVV", "(", "0", ",", "state", ".", "arch", ".", "bits", "-", "val", ".", "length", ")", ")", "else", ":", "vals", "[", "i", "]", "=", "claripy", ".", "BVV", "(", "0", ",", "state", ".", "arch", ".", "bits", "-", "val", ".", "length", ")", ".", "concat", "(", "val", ")", "arg_locs", "[", "i", "]", "=", "arg_session", ".", "next_arg", "(", "is_fp", "=", "False", ",", "size", "=", "vals", "[", "i", "]", ".", "length", "//", "state", ".", "arch", ".", "byte_width", ")", "#", "# STEP 2: decide on memory storage locations", "# implement the contract for stack_base/alloc_base/grow_like_stack", "# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,", "# and the stack pointer should be updated", "#", "if", "stack_base", "is", "None", ":", "if", "alloc_base", "is", "None", ":", "alloc_size", "=", "allocator", ".", "size", "(", ")", "state", ".", "regs", ".", "sp", "-=", "alloc_size", "alloc_base", "=", "state", ".", "regs", ".", "sp", "grow_like_stack", "=", "False", "state", ".", "regs", ".", "sp", "-=", "self", ".", "stack_space", "(", "arg_locs", ")", "# handle alignment", "alignment", "=", "(", "state", ".", "regs", ".", "sp", "+", "self", ".", "STACKARG_SP_DIFF", ")", "%", "self", ".", "STACK_ALIGNMENT", "state", ".", "regs", ".", "sp", "-=", "alignment", "else", ":", "state", ".", "regs", ".", "sp", "=", "stack_base", "if", "alloc_base", "is", "None", ":", "alloc_base", "=", "stack_base", "+", "self", ".", "stack_space", "(", "arg_locs", ")", "grow_like_stack", "=", "False", "if", "grow_like_stack", ":", "alloc_base", "-=", "allocator", ".", "size", "(", ")", "if", "type", "(", "alloc_base", ")", "is", "int", ":", "alloc_base", "=", "claripy", ".", "BVV", "(", "alloc_base", ",", "state", ".", "arch", ".", "bits", ")", "for", "i", ",", "val", "in", "enumerate", "(", "vals", ")", ":", "vals", "[", "i", "]", "=", "allocator", ".", "translate", "(", "val", ",", "alloc_base", ")", "#", "# STEP 3: store everything!", "#", "allocator", ".", "apply", "(", "state", ",", "alloc_base", ")", "for", "loc", ",", "val", "in", "zip", "(", "arg_locs", ",", "vals", ")", ":", "if", "val", ".", "length", ">", "loc", ".", "size", "*", "8", ":", "raise", "ValueError", "(", "\"Can't fit value {} into location {}\"", ".", "format", "(", "repr", "(", "val", ")", ",", "repr", "(", "loc", ")", ")", ")", "loc", ".", "set_value", "(", "state", ",", "val", ",", "endness", "=", "'Iend_BE'", ",", "stack_base", "=", "stack_base", ")", "self", ".", "return_addr", ".", "set_value", "(", "state", ",", "ret_addr", ",", "stack_base", "=", "stack_base", ")" ]
This function performs the actions of the caller getting ready to jump into a function. :param state: The SimState to operate on :param ret_addr: The address to return to when the called function finishes :param args: The list of arguments that that the called function will see :param stack_base: An optional pointer to use as the top of the stack, circa the function entry point :param alloc_base: An optional pointer to use as the place to put excess argument data :param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the same type and size, while tuples (representing structs) can be elements of any type and size. If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value that can't fit in a register will be automatically put in a PointerWrapper. If stack_base is not provided, the current stack pointer will be used, and it will be updated. If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True. grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential allocations happen at increasing addresses.
[ "This", "function", "performs", "the", "actions", "of", "the", "caller", "getting", "ready", "to", "jump", "into", "a", "function", "." ]
python
train
49.444444
hydpy-dev/hydpy
hydpy/auxs/statstools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/statstools.py#L140-L170
def nse(sim=None, obs=None, node=None, skip_nan=False): """Calculate the efficiency criteria after Nash & Sutcliffe. If the simulated values predict the observed values as well as the average observed value (regarding the the mean square error), the NSE value is zero: >>> from hydpy import nse >>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.0 >>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0]) 0.0 For worse and better simulated values the NSE is negative or positive, respectively: >>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]) -3.0 >>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.5 The highest possible value is one: >>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]) 1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |nse|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return 1.-numpy.sum((sim-obs)**2)/numpy.sum((obs-numpy.mean(obs))**2)
[ "def", "nse", "(", "sim", "=", "None", ",", "obs", "=", "None", ",", "node", "=", "None", ",", "skip_nan", "=", "False", ")", ":", "sim", ",", "obs", "=", "prepare_arrays", "(", "sim", ",", "obs", ",", "node", ",", "skip_nan", ")", "return", "1.", "-", "numpy", ".", "sum", "(", "(", "sim", "-", "obs", ")", "**", "2", ")", "/", "numpy", ".", "sum", "(", "(", "obs", "-", "numpy", ".", "mean", "(", "obs", ")", ")", "**", "2", ")" ]
Calculate the efficiency criteria after Nash & Sutcliffe. If the simulated values predict the observed values as well as the average observed value (regarding the the mean square error), the NSE value is zero: >>> from hydpy import nse >>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.0 >>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0]) 0.0 For worse and better simulated values the NSE is negative or positive, respectively: >>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]) -3.0 >>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.5 The highest possible value is one: >>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]) 1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |nse|.
[ "Calculate", "the", "efficiency", "criteria", "after", "Nash", "&", "Sutcliffe", "." ]
python
train
31.967742
globocom/GloboNetworkAPI-client-python
networkapiclient/Ip.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L616-L663
def save_ipv6(self, ip6, id_equip, descricao, id_net): """ Save an IP6 and associate with equipment :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param id_equip: Equipment identifier. Integer value and greater than zero. :param descricao: IPv6 description. :param id_net: Network identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ipv6': {'id': < id >, 'block1': <block1>, 'block2': <block2>, 'block3': <block3>, 'block4': <block4>, 'block5': <block5>, 'block6': <block6>, 'block7': <block7>, 'block8': <block8>, 'descricao': < description >, 'equipamento': [ { all name equipamentos related } ], }} """ if not is_valid_int_param(id_net): raise InvalidParameterError( u'Network identifier is invalid or was not informed.') if not is_valid_int_param(id_equip): raise InvalidParameterError( u'Equipment identifier is invalid or was not informed.') if ip6 is None or ip6 == "": raise InvalidParameterError( u'IPv6 is invalid or was not informed.') ip_map = dict() ip_map['id_net'] = id_net ip_map['descricao'] = descricao ip_map['ip6'] = ip6 ip_map['id_equip'] = id_equip url = "ipv6/save/" code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
[ "def", "save_ipv6", "(", "self", ",", "ip6", ",", "id_equip", ",", "descricao", ",", "id_net", ")", ":", "if", "not", "is_valid_int_param", "(", "id_net", ")", ":", "raise", "InvalidParameterError", "(", "u'Network identifier is invalid or was not informed.'", ")", "if", "not", "is_valid_int_param", "(", "id_equip", ")", ":", "raise", "InvalidParameterError", "(", "u'Equipment identifier is invalid or was not informed.'", ")", "if", "ip6", "is", "None", "or", "ip6", "==", "\"\"", ":", "raise", "InvalidParameterError", "(", "u'IPv6 is invalid or was not informed.'", ")", "ip_map", "=", "dict", "(", ")", "ip_map", "[", "'id_net'", "]", "=", "id_net", "ip_map", "[", "'descricao'", "]", "=", "descricao", "ip_map", "[", "'ip6'", "]", "=", "ip6", "ip_map", "[", "'id_equip'", "]", "=", "id_equip", "url", "=", "\"ipv6/save/\"", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'ip_map'", ":", "ip_map", "}", ",", "'POST'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Save an IP6 and associate with equipment :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param id_equip: Equipment identifier. Integer value and greater than zero. :param descricao: IPv6 description. :param id_net: Network identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ipv6': {'id': < id >, 'block1': <block1>, 'block2': <block2>, 'block3': <block3>, 'block4': <block4>, 'block5': <block5>, 'block6': <block6>, 'block7': <block7>, 'block8': <block8>, 'descricao': < description >, 'equipamento': [ { all name equipamentos related } ], }}
[ "Save", "an", "IP6", "and", "associate", "with", "equipment" ]
python
train
33.5625
LudovicRousseau/PyKCS11
PyKCS11/__init__.py
https://github.com/LudovicRousseau/PyKCS11/blob/76ccd8741af2ea193aaf1ca29dfedfa412c134fe/PyKCS11/__init__.py#L975-L985
def digestSession(self, mecha=MechanismSHA1): """ C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal :param mecha: the digesting mechanism to be used (use `MechanismSHA1` for `CKM_SHA_1`) :type mecha: :class:`Mechanism` :return: A :class:`DigestSession` object :rtype: DigestSession """ return DigestSession(self.lib, self.session, mecha)
[ "def", "digestSession", "(", "self", ",", "mecha", "=", "MechanismSHA1", ")", ":", "return", "DigestSession", "(", "self", ".", "lib", ",", "self", ".", "session", ",", "mecha", ")" ]
C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal :param mecha: the digesting mechanism to be used (use `MechanismSHA1` for `CKM_SHA_1`) :type mecha: :class:`Mechanism` :return: A :class:`DigestSession` object :rtype: DigestSession
[ "C_DigestInit", "/", "C_DigestUpdate", "/", "C_DigestKey", "/", "C_DigestFinal" ]
python
test
36.909091
fboender/ansible-cmdb
lib/mako/runtime.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/runtime.py#L103-L109
def _push_writer(self): """push a capturing buffer onto this Context and return the new writer function.""" buf = util.FastEncodingBuffer() self._buffer_stack.append(buf) return buf.write
[ "def", "_push_writer", "(", "self", ")", ":", "buf", "=", "util", ".", "FastEncodingBuffer", "(", ")", "self", ".", "_buffer_stack", ".", "append", "(", "buf", ")", "return", "buf", ".", "write" ]
push a capturing buffer onto this Context and return the new writer function.
[ "push", "a", "capturing", "buffer", "onto", "this", "Context", "and", "return", "the", "new", "writer", "function", "." ]
python
train
31.714286
Fortran-FOSS-Programmers/ford
ford/sourceform.py
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L998-L1034
def prune(self): """ Remove anything which shouldn't be displayed. """ def to_include(obj): inc = obj.permission in self.display if self.settings['hide_undoc'].lower() == 'true' and not obj.doc: inc = False return inc if self.obj == 'proc' and self.meta['proc_internals'] == 'false': self.functions = [] self.subroutines = [] self.types = [] self.interfaces = [] self.absinterfaces = [] self.variables = [] else: self.functions = [obj for obj in self.functions if to_include(obj)] self.subroutines = [obj for obj in self.subroutines if to_include(obj)] self.types = [obj for obj in self.types if to_include(obj)] self.interfaces = [obj for obj in self.interfaces if to_include(obj)] self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)] self.variables = [obj for obj in self.variables if to_include(obj)] if hasattr(self,'modprocedures'): self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)] if hasattr(self,'modsubroutines'): self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)] if hasattr(self,'modfunctions'): self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)] # Recurse for obj in self.absinterfaces: obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.prune()
[ "def", "prune", "(", "self", ")", ":", "def", "to_include", "(", "obj", ")", ":", "inc", "=", "obj", ".", "permission", "in", "self", ".", "display", "if", "self", ".", "settings", "[", "'hide_undoc'", "]", ".", "lower", "(", ")", "==", "'true'", "and", "not", "obj", ".", "doc", ":", "inc", "=", "False", "return", "inc", "if", "self", ".", "obj", "==", "'proc'", "and", "self", ".", "meta", "[", "'proc_internals'", "]", "==", "'false'", ":", "self", ".", "functions", "=", "[", "]", "self", ".", "subroutines", "=", "[", "]", "self", ".", "types", "=", "[", "]", "self", ".", "interfaces", "=", "[", "]", "self", ".", "absinterfaces", "=", "[", "]", "self", ".", "variables", "=", "[", "]", "else", ":", "self", ".", "functions", "=", "[", "obj", "for", "obj", "in", "self", ".", "functions", "if", "to_include", "(", "obj", ")", "]", "self", ".", "subroutines", "=", "[", "obj", "for", "obj", "in", "self", ".", "subroutines", "if", "to_include", "(", "obj", ")", "]", "self", ".", "types", "=", "[", "obj", "for", "obj", "in", "self", ".", "types", "if", "to_include", "(", "obj", ")", "]", "self", ".", "interfaces", "=", "[", "obj", "for", "obj", "in", "self", ".", "interfaces", "if", "to_include", "(", "obj", ")", "]", "self", ".", "absinterfaces", "=", "[", "obj", "for", "obj", "in", "self", ".", "absinterfaces", "if", "to_include", "(", "obj", ")", "]", "self", ".", "variables", "=", "[", "obj", "for", "obj", "in", "self", ".", "variables", "if", "to_include", "(", "obj", ")", "]", "if", "hasattr", "(", "self", ",", "'modprocedures'", ")", ":", "self", ".", "modprocedures", "=", "[", "obj", "for", "obj", "in", "self", ".", "modprocedures", "if", "to_include", "(", "obj", ")", "]", "if", "hasattr", "(", "self", ",", "'modsubroutines'", ")", ":", "self", ".", "modsubroutines", "=", "[", "obj", "for", "obj", "in", "self", ".", "modsubroutines", "if", "to_include", "(", "obj", ")", "]", "if", "hasattr", "(", "self", ",", "'modfunctions'", ")", ":", "self", ".", "modfunctions", "=", "[", "obj", "for", "obj", "in", "self", ".", "modfunctions", "if", "to_include", "(", "obj", ")", "]", "# Recurse", "for", "obj", "in", "self", ".", "absinterfaces", ":", "obj", ".", "visible", "=", "True", "for", "obj", "in", "self", ".", "iterator", "(", "'functions'", ",", "'subroutines'", ",", "'types'", ",", "'interfaces'", ",", "'modprocedures'", ",", "'modfunctions'", ",", "'modsubroutines'", ")", ":", "obj", ".", "visible", "=", "True", "for", "obj", "in", "self", ".", "iterator", "(", "'functions'", ",", "'subroutines'", ",", "'types'", ",", "'modprocedures'", ",", "'modfunctions'", ",", "'modsubroutines'", ")", ":", "obj", ".", "prune", "(", ")" ]
Remove anything which shouldn't be displayed.
[ "Remove", "anything", "which", "shouldn", "t", "be", "displayed", "." ]
python
train
50
ecmwf/cfgrib
cfgrib/dataset.py
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/dataset.py#L502-L508
def open_file(path, grib_errors='warn', **kwargs): """Open a GRIB file as a ``cfgrib.Dataset``.""" if 'mode' in kwargs: warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning) kwargs.pop('mode') stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors) return Dataset(*build_dataset_components(stream, **kwargs))
[ "def", "open_file", "(", "path", ",", "grib_errors", "=", "'warn'", ",", "*", "*", "kwargs", ")", ":", "if", "'mode'", "in", "kwargs", ":", "warnings", ".", "warn", "(", "\"the `mode` keyword argument is ignored and deprecated\"", ",", "FutureWarning", ")", "kwargs", ".", "pop", "(", "'mode'", ")", "stream", "=", "messages", ".", "FileStream", "(", "path", ",", "message_class", "=", "cfmessage", ".", "CfMessage", ",", "errors", "=", "grib_errors", ")", "return", "Dataset", "(", "*", "build_dataset_components", "(", "stream", ",", "*", "*", "kwargs", ")", ")" ]
Open a GRIB file as a ``cfgrib.Dataset``.
[ "Open", "a", "GRIB", "file", "as", "a", "cfgrib", ".", "Dataset", "." ]
python
train
57.142857
NatLibFi/Skosify
skosify/skosify.py
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L184-L206
def initialize_concept_scheme(rdf, cs, label, language, set_modified): """Initialize a concept scheme: Optionally add a label if the concept scheme doesn't have a label, and optionally add a dct:modified timestamp.""" # check whether the concept scheme is unlabeled, and label it if possible labels = list(rdf.objects(cs, RDFS.label)) + \ list(rdf.objects(cs, SKOS.prefLabel)) if len(labels) == 0: if not label: logging.warning( "Concept scheme has no label(s). " "Use --label option to set the concept scheme label.") else: logging.info( "Unlabeled concept scheme detected. Setting label to '%s'" % label) rdf.add((cs, RDFS.label, Literal(label, language))) if set_modified: curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z' rdf.remove((cs, DCTERMS.modified, None)) rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime)))
[ "def", "initialize_concept_scheme", "(", "rdf", ",", "cs", ",", "label", ",", "language", ",", "set_modified", ")", ":", "# check whether the concept scheme is unlabeled, and label it if possible", "labels", "=", "list", "(", "rdf", ".", "objects", "(", "cs", ",", "RDFS", ".", "label", ")", ")", "+", "list", "(", "rdf", ".", "objects", "(", "cs", ",", "SKOS", ".", "prefLabel", ")", ")", "if", "len", "(", "labels", ")", "==", "0", ":", "if", "not", "label", ":", "logging", ".", "warning", "(", "\"Concept scheme has no label(s). \"", "\"Use --label option to set the concept scheme label.\"", ")", "else", ":", "logging", ".", "info", "(", "\"Unlabeled concept scheme detected. Setting label to '%s'\"", "%", "label", ")", "rdf", ".", "add", "(", "(", "cs", ",", "RDFS", ".", "label", ",", "Literal", "(", "label", ",", "language", ")", ")", ")", "if", "set_modified", ":", "curdate", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "microsecond", "=", "0", ")", ".", "isoformat", "(", ")", "+", "'Z'", "rdf", ".", "remove", "(", "(", "cs", ",", "DCTERMS", ".", "modified", ",", "None", ")", ")", "rdf", ".", "add", "(", "(", "cs", ",", "DCTERMS", ".", "modified", ",", "Literal", "(", "curdate", ",", "datatype", "=", "XSD", ".", "dateTime", ")", ")", ")" ]
Initialize a concept scheme: Optionally add a label if the concept scheme doesn't have a label, and optionally add a dct:modified timestamp.
[ "Initialize", "a", "concept", "scheme", ":", "Optionally", "add", "a", "label", "if", "the", "concept", "scheme", "doesn", "t", "have", "a", "label", "and", "optionally", "add", "a", "dct", ":", "modified", "timestamp", "." ]
python
train
44.478261
DataDog/integrations-core
vsphere/datadog_checks/vsphere/vsphere.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/vsphere/datadog_checks/vsphere/vsphere.py#L573-L608
def _cache_morlist_raw(self, instance): """ Fill the Mor objects queue that will be asynchronously processed later. Resolve the vCenter `rootFolder` and initiate hosts and virtual machines discovery. """ i_key = self._instance_key(instance) self.log.debug("Caching the morlist for vcenter instance {}".format(i_key)) # If the queue is not completely empty, don't do anything for resource_type in RESOURCE_TYPE_METRICS: if self.mor_objects_queue.contains(i_key) and self.mor_objects_queue.size(i_key, resource_type): last = self.cache_config.get_last(CacheConfig.Morlist, i_key) self.log.debug( "Skipping morlist collection: the objects queue for the " "resource type '{}' is still being processed " "(latest refresh was {}s ago)".format(ensure_unicode(resource_type), time.time() - last) ) return tags = ["vcenter_server:{}".format(ensure_unicode(instance.get('name')))] regexes = { 'host_include': instance.get('host_include_only_regex'), 'vm_include': instance.get('vm_include_only_regex'), } include_only_marked = is_affirmative(instance.get('include_only_marked', False)) # Discover hosts and virtual machines server_instance = self._get_server_instance(instance) use_guest_hostname = is_affirmative(instance.get("use_guest_hostname", False)) all_objs = self._get_all_objs( server_instance, regexes, include_only_marked, tags, use_guest_hostname=use_guest_hostname ) self.mor_objects_queue.fill(i_key, dict(all_objs)) self.cache_config.set_last(CacheConfig.Morlist, i_key, time.time())
[ "def", "_cache_morlist_raw", "(", "self", ",", "instance", ")", ":", "i_key", "=", "self", ".", "_instance_key", "(", "instance", ")", "self", ".", "log", ".", "debug", "(", "\"Caching the morlist for vcenter instance {}\"", ".", "format", "(", "i_key", ")", ")", "# If the queue is not completely empty, don't do anything", "for", "resource_type", "in", "RESOURCE_TYPE_METRICS", ":", "if", "self", ".", "mor_objects_queue", ".", "contains", "(", "i_key", ")", "and", "self", ".", "mor_objects_queue", ".", "size", "(", "i_key", ",", "resource_type", ")", ":", "last", "=", "self", ".", "cache_config", ".", "get_last", "(", "CacheConfig", ".", "Morlist", ",", "i_key", ")", "self", ".", "log", ".", "debug", "(", "\"Skipping morlist collection: the objects queue for the \"", "\"resource type '{}' is still being processed \"", "\"(latest refresh was {}s ago)\"", ".", "format", "(", "ensure_unicode", "(", "resource_type", ")", ",", "time", ".", "time", "(", ")", "-", "last", ")", ")", "return", "tags", "=", "[", "\"vcenter_server:{}\"", ".", "format", "(", "ensure_unicode", "(", "instance", ".", "get", "(", "'name'", ")", ")", ")", "]", "regexes", "=", "{", "'host_include'", ":", "instance", ".", "get", "(", "'host_include_only_regex'", ")", ",", "'vm_include'", ":", "instance", ".", "get", "(", "'vm_include_only_regex'", ")", ",", "}", "include_only_marked", "=", "is_affirmative", "(", "instance", ".", "get", "(", "'include_only_marked'", ",", "False", ")", ")", "# Discover hosts and virtual machines", "server_instance", "=", "self", ".", "_get_server_instance", "(", "instance", ")", "use_guest_hostname", "=", "is_affirmative", "(", "instance", ".", "get", "(", "\"use_guest_hostname\"", ",", "False", ")", ")", "all_objs", "=", "self", ".", "_get_all_objs", "(", "server_instance", ",", "regexes", ",", "include_only_marked", ",", "tags", ",", "use_guest_hostname", "=", "use_guest_hostname", ")", "self", ".", "mor_objects_queue", ".", "fill", "(", "i_key", ",", "dict", "(", "all_objs", ")", ")", "self", ".", "cache_config", ".", "set_last", "(", "CacheConfig", ".", "Morlist", ",", "i_key", ",", "time", ".", "time", "(", ")", ")" ]
Fill the Mor objects queue that will be asynchronously processed later. Resolve the vCenter `rootFolder` and initiate hosts and virtual machines discovery.
[ "Fill", "the", "Mor", "objects", "queue", "that", "will", "be", "asynchronously", "processed", "later", ".", "Resolve", "the", "vCenter", "rootFolder", "and", "initiate", "hosts", "and", "virtual", "machines", "discovery", "." ]
python
train
49.722222
20c/grainy
grainy/core.py
https://github.com/20c/grainy/blob/cd956fd4144044993abc967974a127aab07a8ef6/grainy/core.py#L12-L40
def int_flags(flags, mapper=const.PERM_STRING_MAP): """ Converts string permission flags into integer permission flags as specified in const.PERM_STRING_MAP Arguments: - flags <str>: one or more flags For example: "crud" or "ru" or "r" - mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping int permission flag to string permission flag. If not specified will default to const.PERM_STRING_MAP. Returns: - int """ r = 0 if not flags: return r if isinstance(flags, six.integer_types): return flags if not isinstance(flags, six.string_types): raise TypeError("`flags` needs to be a string or integer type") for f in flags: for f_i, f_s in mapper: if f_s == f: r = r | f_i return r
[ "def", "int_flags", "(", "flags", ",", "mapper", "=", "const", ".", "PERM_STRING_MAP", ")", ":", "r", "=", "0", "if", "not", "flags", ":", "return", "r", "if", "isinstance", "(", "flags", ",", "six", ".", "integer_types", ")", ":", "return", "flags", "if", "not", "isinstance", "(", "flags", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "\"`flags` needs to be a string or integer type\"", ")", "for", "f", "in", "flags", ":", "for", "f_i", ",", "f_s", "in", "mapper", ":", "if", "f_s", "==", "f", ":", "r", "=", "r", "|", "f_i", "return", "r" ]
Converts string permission flags into integer permission flags as specified in const.PERM_STRING_MAP Arguments: - flags <str>: one or more flags For example: "crud" or "ru" or "r" - mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping int permission flag to string permission flag. If not specified will default to const.PERM_STRING_MAP. Returns: - int
[ "Converts", "string", "permission", "flags", "into", "integer", "permission", "flags", "as", "specified", "in", "const", ".", "PERM_STRING_MAP", "Arguments", ":", "-", "flags", "<str", ">", ":", "one", "or", "more", "flags", "For", "example", ":", "crud", "or", "ru", "or", "r", "-", "mapper", "<list", "=", "const", ".", "PERM_STRING_MAP", ">", ":", "a", "list", "containing", "tuples", "mapping", "int", "permission", "flag", "to", "string", "permission", "flag", ".", "If", "not", "specified", "will", "default", "to", "const", ".", "PERM_STRING_MAP", ".", "Returns", ":", "-", "int" ]
python
train
28.896552
MartinThoma/mpu
mpu/math.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/math.py#L23-L61
def generate_primes(): """ Generate an infinite sequence of prime numbers. The algorithm was originally written by David Eppstein, UC Irvine. See: http://code.activestate.com/recipes/117119/ Examples -------- >>> g = generate_primes() >>> next(g) 2 >>> next(g) 3 >>> next(g) 5 """ divisors = {} # map number to at least one divisor candidate = 2 # next potential prime while True: if candidate in divisors: # candidate is composite. divisors[candidate] is the list of primes # that divide it. Since we've reached candidate, we no longer need # it in the map, but we'll mark the next multiples of its witnesses # to prepare for larger numbers for p in divisors[candidate]: divisors.setdefault(p + candidate, []).append(p) del divisors[candidate] else: # candidate is a new prime yield candidate # mark its first multiple that isn't # already marked in previous iterations divisors[candidate * candidate] = [candidate] candidate += 1
[ "def", "generate_primes", "(", ")", ":", "divisors", "=", "{", "}", "# map number to at least one divisor", "candidate", "=", "2", "# next potential prime", "while", "True", ":", "if", "candidate", "in", "divisors", ":", "# candidate is composite. divisors[candidate] is the list of primes", "# that divide it. Since we've reached candidate, we no longer need", "# it in the map, but we'll mark the next multiples of its witnesses", "# to prepare for larger numbers", "for", "p", "in", "divisors", "[", "candidate", "]", ":", "divisors", ".", "setdefault", "(", "p", "+", "candidate", ",", "[", "]", ")", ".", "append", "(", "p", ")", "del", "divisors", "[", "candidate", "]", "else", ":", "# candidate is a new prime", "yield", "candidate", "# mark its first multiple that isn't", "# already marked in previous iterations", "divisors", "[", "candidate", "*", "candidate", "]", "=", "[", "candidate", "]", "candidate", "+=", "1" ]
Generate an infinite sequence of prime numbers. The algorithm was originally written by David Eppstein, UC Irvine. See: http://code.activestate.com/recipes/117119/ Examples -------- >>> g = generate_primes() >>> next(g) 2 >>> next(g) 3 >>> next(g) 5
[ "Generate", "an", "infinite", "sequence", "of", "prime", "numbers", "." ]
python
train
29.25641
apple/turicreate
src/unity/python/turicreate/data_structures/sframe.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L3295-L3347
def remove_column(self, column_name, inplace=False): """ Returns an SFrame with a column removed. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name : string The name of the column to remove. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with given column removed. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> # This is equivalent to `del sf['val']` >>> res = sf.remove_column('val') >>> res +----+ | id | +----+ | 1 | | 2 | | 3 | +----+ [3 rows x 1 columns] """ column_name = str(column_name) if column_name not in self.column_names(): raise KeyError('Cannot find column %s' % column_name) colid = self.column_names().index(column_name) if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.remove_column(colid) ret._cache = None return ret
[ "def", "remove_column", "(", "self", ",", "column_name", ",", "inplace", "=", "False", ")", ":", "column_name", "=", "str", "(", "column_name", ")", "if", "column_name", "not", "in", "self", ".", "column_names", "(", ")", ":", "raise", "KeyError", "(", "'Cannot find column %s'", "%", "column_name", ")", "colid", "=", "self", ".", "column_names", "(", ")", ".", "index", "(", "column_name", ")", "if", "inplace", ":", "ret", "=", "self", "else", ":", "ret", "=", "self", ".", "copy", "(", ")", "with", "cython_context", "(", ")", ":", "ret", ".", "__proxy__", ".", "remove_column", "(", "colid", ")", "ret", ".", "_cache", "=", "None", "return", "ret" ]
Returns an SFrame with a column removed. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name : string The name of the column to remove. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with given column removed. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> # This is equivalent to `del sf['val']` >>> res = sf.remove_column('val') >>> res +----+ | id | +----+ | 1 | | 2 | | 3 | +----+ [3 rows x 1 columns]
[ "Returns", "an", "SFrame", "with", "a", "column", "removed", "." ]
python
train
26.377358
satellogic/telluric
telluric/vectors.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/vectors.py#L98-L126
def generate_tile_coordinates(roi, num_tiles): # type: (GeoVector, Tuple[int, int]) -> Iterator[GeoVector] """Yields N x M rectangular tiles for a region of interest. Parameters ---------- roi : GeoVector Region of interest num_tiles : tuple Tuple (horizontal_tiles, vertical_tiles) Yields ------ ~telluric.vectors.GeoVector """ bounds = roi.get_shape(roi.crs).bounds x_range = np.linspace(bounds[0], bounds[2], int(num_tiles[0]) + 1) y_range = np.linspace(bounds[1], bounds[3], int(num_tiles[1]) + 1) for y_start, y_end in zip(y_range[:-1], y_range[1:]): for x_start, x_end in zip(x_range[:-1], x_range[1:]): new_roi = GeoVector( Polygon.from_bounds(x_start, y_start, x_end, y_end), roi.crs ) yield new_roi
[ "def", "generate_tile_coordinates", "(", "roi", ",", "num_tiles", ")", ":", "# type: (GeoVector, Tuple[int, int]) -> Iterator[GeoVector]", "bounds", "=", "roi", ".", "get_shape", "(", "roi", ".", "crs", ")", ".", "bounds", "x_range", "=", "np", ".", "linspace", "(", "bounds", "[", "0", "]", ",", "bounds", "[", "2", "]", ",", "int", "(", "num_tiles", "[", "0", "]", ")", "+", "1", ")", "y_range", "=", "np", ".", "linspace", "(", "bounds", "[", "1", "]", ",", "bounds", "[", "3", "]", ",", "int", "(", "num_tiles", "[", "1", "]", ")", "+", "1", ")", "for", "y_start", ",", "y_end", "in", "zip", "(", "y_range", "[", ":", "-", "1", "]", ",", "y_range", "[", "1", ":", "]", ")", ":", "for", "x_start", ",", "x_end", "in", "zip", "(", "x_range", "[", ":", "-", "1", "]", ",", "x_range", "[", "1", ":", "]", ")", ":", "new_roi", "=", "GeoVector", "(", "Polygon", ".", "from_bounds", "(", "x_start", ",", "y_start", ",", "x_end", ",", "y_end", ")", ",", "roi", ".", "crs", ")", "yield", "new_roi" ]
Yields N x M rectangular tiles for a region of interest. Parameters ---------- roi : GeoVector Region of interest num_tiles : tuple Tuple (horizontal_tiles, vertical_tiles) Yields ------ ~telluric.vectors.GeoVector
[ "Yields", "N", "x", "M", "rectangular", "tiles", "for", "a", "region", "of", "interest", "." ]
python
train
28.724138
SeabornGames/Logger
seaborn_logger/logger.py
https://github.com/SeabornGames/Logger/blob/fb8b1700557aaea8d3216bd4c4df33c302bece7f/seaborn_logger/logger.py#L45-L72
def findCaller(self, stack_info=False): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = logging.currentframe() # On some versions of IronPython, currentframe() returns None if # IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == logging._srcfile or filename == self._srcfile: f = f.f_back continue rv = (co.co_filename, f.f_lineno, co.co_name) if stack_info: sio = io.StringIO() sio.write('Stack (most recent call last):\n') traceback.print_stack(f, file=sio) sinfo = sio.getvalue() if sinfo[-1] == '\n': sinfo = sinfo[:-1] sio.close() break return rv
[ "def", "findCaller", "(", "self", ",", "stack_info", "=", "False", ")", ":", "f", "=", "logging", ".", "currentframe", "(", ")", "# On some versions of IronPython, currentframe() returns None if", "# IronPython isn't run with -X:Frames.", "if", "f", "is", "not", "None", ":", "f", "=", "f", ".", "f_back", "rv", "=", "\"(unknown file)\"", ",", "0", ",", "\"(unknown function)\"", "while", "hasattr", "(", "f", ",", "\"f_code\"", ")", ":", "co", "=", "f", ".", "f_code", "filename", "=", "os", ".", "path", ".", "normcase", "(", "co", ".", "co_filename", ")", "if", "filename", "==", "logging", ".", "_srcfile", "or", "filename", "==", "self", ".", "_srcfile", ":", "f", "=", "f", ".", "f_back", "continue", "rv", "=", "(", "co", ".", "co_filename", ",", "f", ".", "f_lineno", ",", "co", ".", "co_name", ")", "if", "stack_info", ":", "sio", "=", "io", ".", "StringIO", "(", ")", "sio", ".", "write", "(", "'Stack (most recent call last):\\n'", ")", "traceback", ".", "print_stack", "(", "f", ",", "file", "=", "sio", ")", "sinfo", "=", "sio", ".", "getvalue", "(", ")", "if", "sinfo", "[", "-", "1", "]", "==", "'\\n'", ":", "sinfo", "=", "sinfo", "[", ":", "-", "1", "]", "sio", ".", "close", "(", ")", "break", "return", "rv" ]
Find the stack frame of the caller so that we can note the source file name, line number and function name.
[ "Find", "the", "stack", "frame", "of", "the", "caller", "so", "that", "we", "can", "note", "the", "source", "file", "name", "line", "number", "and", "function", "name", "." ]
python
test
38.607143
knipknap/exscript
Exscript/account.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/account.py#L123-L138
def release(self, signal=True): """ Unlocks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the released_event signal. """ if not self.needs_lock: return with self.synclock: self.lock.release() if signal: self.released_event(self) self.synclock.notify_all()
[ "def", "release", "(", "self", ",", "signal", "=", "True", ")", ":", "if", "not", "self", ".", "needs_lock", ":", "return", "with", "self", ".", "synclock", ":", "self", ".", "lock", ".", "release", "(", ")", "if", "signal", ":", "self", ".", "released_event", "(", "self", ")", "self", ".", "synclock", ".", "notify_all", "(", ")" ]
Unlocks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the released_event signal.
[ "Unlocks", "the", "account", ".", "Method", "has", "no", "effect", "if", "the", "constructor", "argument", "needs_lock", "wsa", "set", "to", "False", "." ]
python
train
29.625
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/common.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/common.py#L347-L368
def dt_str_to_posix(dt_str): """format str to posix. datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ, e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator between date and time when they are on the same line. Z indicates UTC (zero meridian). A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html This is used to parse LastModified node from GCS's GET bucket XML response. Args: dt_str: A datetime str. Returns: A float of secs from unix epoch. By posix definition, epoch is midnight 1970/1/1 UTC. """ parsable, _ = dt_str.split('.') dt = datetime.datetime.strptime(parsable, _DT_FORMAT) return calendar.timegm(dt.utctimetuple())
[ "def", "dt_str_to_posix", "(", "dt_str", ")", ":", "parsable", ",", "_", "=", "dt_str", ".", "split", "(", "'.'", ")", "dt", "=", "datetime", ".", "datetime", ".", "strptime", "(", "parsable", ",", "_DT_FORMAT", ")", "return", "calendar", ".", "timegm", "(", "dt", ".", "utctimetuple", "(", ")", ")" ]
format str to posix. datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ, e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator between date and time when they are on the same line. Z indicates UTC (zero meridian). A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html This is used to parse LastModified node from GCS's GET bucket XML response. Args: dt_str: A datetime str. Returns: A float of secs from unix epoch. By posix definition, epoch is midnight 1970/1/1 UTC.
[ "format", "str", "to", "posix", "." ]
python
train
30.454545
Neurita/boyle
scripts/compare_id_sets.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/compare_id_sets.py#L280-L308
def _print_foreign_repetition_table(self, idset1, idset2): """ :param idset1: :param idset2: """ assert(isinstance(idset1, idset_with_reference)) assert(isinstance(idset2, idset)) reps = idset2.get_repetitions() if len(reps) < 1: return refs = np.array(idset1.reflst) table = [['{0} {1} values of repetitions in {2}'.format(idset1.name, idset1.refname, idset2.name), '']] for rep in reps: if np.any(idset1 == rep): matches = refs[np.where(idset1 == rep)] myrep = rep for m in matches: table.append([myrep, m]) myrep = '' print(tabulate(table, headers='firstrow')) print('\n')
[ "def", "_print_foreign_repetition_table", "(", "self", ",", "idset1", ",", "idset2", ")", ":", "assert", "(", "isinstance", "(", "idset1", ",", "idset_with_reference", ")", ")", "assert", "(", "isinstance", "(", "idset2", ",", "idset", ")", ")", "reps", "=", "idset2", ".", "get_repetitions", "(", ")", "if", "len", "(", "reps", ")", "<", "1", ":", "return", "refs", "=", "np", ".", "array", "(", "idset1", ".", "reflst", ")", "table", "=", "[", "[", "'{0} {1} values of repetitions in {2}'", ".", "format", "(", "idset1", ".", "name", ",", "idset1", ".", "refname", ",", "idset2", ".", "name", ")", ",", "''", "]", "]", "for", "rep", "in", "reps", ":", "if", "np", ".", "any", "(", "idset1", "==", "rep", ")", ":", "matches", "=", "refs", "[", "np", ".", "where", "(", "idset1", "==", "rep", ")", "]", "myrep", "=", "rep", "for", "m", "in", "matches", ":", "table", ".", "append", "(", "[", "myrep", ",", "m", "]", ")", "myrep", "=", "''", "print", "(", "tabulate", "(", "table", ",", "headers", "=", "'firstrow'", ")", ")", "print", "(", "'\\n'", ")" ]
:param idset1: :param idset2:
[ ":", "param", "idset1", ":", ":", "param", "idset2", ":" ]
python
valid
31.517241
cloudant/python-cloudant
src/cloudant/database.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1048-L1096
def get_query_indexes(self, raw_result=False): """ Retrieves query indexes from the remote database. :param bool raw_result: If set to True then the raw JSON content for the request is returned. Default is to return a list containing :class:`~cloudant.index.Index`, :class:`~cloudant.index.TextIndex`, and :class:`~cloudant.index.SpecialIndex` wrapped objects. :returns: The query indexes in the database """ url = '/'.join((self.database_url, '_index')) resp = self.r_session.get(url) resp.raise_for_status() if raw_result: return response_to_json_dict(resp) indexes = [] for data in response_to_json_dict(resp).get('indexes', []): if data.get('type') == JSON_INDEX_TYPE: indexes.append(Index( self, data.get('ddoc'), data.get('name'), partitioned=data.get('partitioned', False), **data.get('def', {}) )) elif data.get('type') == TEXT_INDEX_TYPE: indexes.append(TextIndex( self, data.get('ddoc'), data.get('name'), partitioned=data.get('partitioned', False), **data.get('def', {}) )) elif data.get('type') == SPECIAL_INDEX_TYPE: indexes.append(SpecialIndex( self, data.get('ddoc'), data.get('name'), partitioned=data.get('partitioned', False), **data.get('def', {}) )) else: raise CloudantDatabaseException(101, data.get('type')) return indexes
[ "def", "get_query_indexes", "(", "self", ",", "raw_result", "=", "False", ")", ":", "url", "=", "'/'", ".", "join", "(", "(", "self", ".", "database_url", ",", "'_index'", ")", ")", "resp", "=", "self", ".", "r_session", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "if", "raw_result", ":", "return", "response_to_json_dict", "(", "resp", ")", "indexes", "=", "[", "]", "for", "data", "in", "response_to_json_dict", "(", "resp", ")", ".", "get", "(", "'indexes'", ",", "[", "]", ")", ":", "if", "data", ".", "get", "(", "'type'", ")", "==", "JSON_INDEX_TYPE", ":", "indexes", ".", "append", "(", "Index", "(", "self", ",", "data", ".", "get", "(", "'ddoc'", ")", ",", "data", ".", "get", "(", "'name'", ")", ",", "partitioned", "=", "data", ".", "get", "(", "'partitioned'", ",", "False", ")", ",", "*", "*", "data", ".", "get", "(", "'def'", ",", "{", "}", ")", ")", ")", "elif", "data", ".", "get", "(", "'type'", ")", "==", "TEXT_INDEX_TYPE", ":", "indexes", ".", "append", "(", "TextIndex", "(", "self", ",", "data", ".", "get", "(", "'ddoc'", ")", ",", "data", ".", "get", "(", "'name'", ")", ",", "partitioned", "=", "data", ".", "get", "(", "'partitioned'", ",", "False", ")", ",", "*", "*", "data", ".", "get", "(", "'def'", ",", "{", "}", ")", ")", ")", "elif", "data", ".", "get", "(", "'type'", ")", "==", "SPECIAL_INDEX_TYPE", ":", "indexes", ".", "append", "(", "SpecialIndex", "(", "self", ",", "data", ".", "get", "(", "'ddoc'", ")", ",", "data", ".", "get", "(", "'name'", ")", ",", "partitioned", "=", "data", ".", "get", "(", "'partitioned'", ",", "False", ")", ",", "*", "*", "data", ".", "get", "(", "'def'", ",", "{", "}", ")", ")", ")", "else", ":", "raise", "CloudantDatabaseException", "(", "101", ",", "data", ".", "get", "(", "'type'", ")", ")", "return", "indexes" ]
Retrieves query indexes from the remote database. :param bool raw_result: If set to True then the raw JSON content for the request is returned. Default is to return a list containing :class:`~cloudant.index.Index`, :class:`~cloudant.index.TextIndex`, and :class:`~cloudant.index.SpecialIndex` wrapped objects. :returns: The query indexes in the database
[ "Retrieves", "query", "indexes", "from", "the", "remote", "database", "." ]
python
train
37.122449
RediSearch/redisearch-py
redisearch/client.py
https://github.com/RediSearch/redisearch-py/blob/f65d1dd078713cbe9b83584e86655a254d0531ab/redisearch/client.py#L259-L267
def delete_document(self, doc_id, conn=None): """ Delete a document from index Returns 1 if the document was deleted, 0 if not """ if conn is None: conn = self.redis return conn.execute_command(self.DEL_CMD, self.index_name, doc_id)
[ "def", "delete_document", "(", "self", ",", "doc_id", ",", "conn", "=", "None", ")", ":", "if", "conn", "is", "None", ":", "conn", "=", "self", ".", "redis", "return", "conn", ".", "execute_command", "(", "self", ".", "DEL_CMD", ",", "self", ".", "index_name", ",", "doc_id", ")" ]
Delete a document from index Returns 1 if the document was deleted, 0 if not
[ "Delete", "a", "document", "from", "index", "Returns", "1", "if", "the", "document", "was", "deleted", "0", "if", "not" ]
python
valid
31.666667
jeroyang/txttk
txttk/retools.py
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L75-L104
def is_solid(regex): """ Check the given regular expression is solid. >>> is_solid(r'a') True >>> is_solid(r'[ab]') True >>> is_solid(r'(a|b|c)') True >>> is_solid(r'(a|b|c)?') True >>> is_solid(r'(a|b)(c)') False >>> is_solid(r'(a|b)(c)?') False """ shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex) skeleton = shape.replace('#', '') if len(shape) <= 1: return True if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape): return True if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape): return True if re.match(r'^\(\)#*?\)\)', skeleton): return True else: return False
[ "def", "is_solid", "(", "regex", ")", ":", "shape", "=", "re", ".", "sub", "(", "r'(\\\\.|[^\\[\\]\\(\\)\\|\\?\\+\\*])'", ",", "'#'", ",", "regex", ")", "skeleton", "=", "shape", ".", "replace", "(", "'#'", ",", "''", ")", "if", "len", "(", "shape", ")", "<=", "1", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\[[^\\]]*\\][\\*\\+\\?]?$'", ",", "shape", ")", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\([^\\(]*\\)[\\*\\+\\?]?$'", ",", "shape", ")", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\(\\)#*?\\)\\)'", ",", "skeleton", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check the given regular expression is solid. >>> is_solid(r'a') True >>> is_solid(r'[ab]') True >>> is_solid(r'(a|b|c)') True >>> is_solid(r'(a|b|c)?') True >>> is_solid(r'(a|b)(c)') False >>> is_solid(r'(a|b)(c)?') False
[ "Check", "the", "given", "regular", "expression", "is", "solid", "." ]
python
train
22.033333
fitnr/convertdate
convertdate/indian_civil.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/indian_civil.py#L77-L117
def from_jd(jd): '''Calculate Indian Civil date from Julian day Offset in years from Saka era to Gregorian epoch''' start = 80 # Day offset between Saka and Gregorian jd = trunc(jd) + 0.5 greg = gregorian.from_jd(jd) # Gregorian date for Julian day leap = isleap(greg[0]) # Is this a leap year? # Tentative year in Saka era year = greg[0] - SAKA_EPOCH # JD at start of Gregorian year greg0 = gregorian.to_jd(greg[0], 1, 1) yday = jd - greg0 # Day number (0 based) in Gregorian year if leap: Caitra = 31 # Days in Caitra this year else: Caitra = 30 if yday < start: # Day is at the end of the preceding Saka year year -= 1 yday += Caitra + (31 * 5) + (30 * 3) + 10 + start yday -= start if yday < Caitra: month = 1 day = yday + 1 else: mday = yday - Caitra if (mday < (31 * 5)): month = trunc(mday / 31) + 2 day = (mday % 31) + 1 else: mday -= 31 * 5 month = trunc(mday / 30) + 7 day = (mday % 30) + 1 return (year, month, int(day))
[ "def", "from_jd", "(", "jd", ")", ":", "start", "=", "80", "# Day offset between Saka and Gregorian", "jd", "=", "trunc", "(", "jd", ")", "+", "0.5", "greg", "=", "gregorian", ".", "from_jd", "(", "jd", ")", "# Gregorian date for Julian day", "leap", "=", "isleap", "(", "greg", "[", "0", "]", ")", "# Is this a leap year?", "# Tentative year in Saka era", "year", "=", "greg", "[", "0", "]", "-", "SAKA_EPOCH", "# JD at start of Gregorian year", "greg0", "=", "gregorian", ".", "to_jd", "(", "greg", "[", "0", "]", ",", "1", ",", "1", ")", "yday", "=", "jd", "-", "greg0", "# Day number (0 based) in Gregorian year", "if", "leap", ":", "Caitra", "=", "31", "# Days in Caitra this year", "else", ":", "Caitra", "=", "30", "if", "yday", "<", "start", ":", "# Day is at the end of the preceding Saka year", "year", "-=", "1", "yday", "+=", "Caitra", "+", "(", "31", "*", "5", ")", "+", "(", "30", "*", "3", ")", "+", "10", "+", "start", "yday", "-=", "start", "if", "yday", "<", "Caitra", ":", "month", "=", "1", "day", "=", "yday", "+", "1", "else", ":", "mday", "=", "yday", "-", "Caitra", "if", "(", "mday", "<", "(", "31", "*", "5", ")", ")", ":", "month", "=", "trunc", "(", "mday", "/", "31", ")", "+", "2", "day", "=", "(", "mday", "%", "31", ")", "+", "1", "else", ":", "mday", "-=", "31", "*", "5", "month", "=", "trunc", "(", "mday", "/", "30", ")", "+", "7", "day", "=", "(", "mday", "%", "30", ")", "+", "1", "return", "(", "year", ",", "month", ",", "int", "(", "day", ")", ")" ]
Calculate Indian Civil date from Julian day Offset in years from Saka era to Gregorian epoch
[ "Calculate", "Indian", "Civil", "date", "from", "Julian", "day", "Offset", "in", "years", "from", "Saka", "era", "to", "Gregorian", "epoch" ]
python
train
27.243902
saltstack/salt
salt/modules/rh_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_service.py#L461-L475
def reload_(name): ''' Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> ''' if _service_is_upstart(name): cmd = 'reload {0}'.format(name) else: cmd = '/sbin/service {0} reload'.format(name) return not __salt__['cmd.retcode'](cmd, python_shell=False)
[ "def", "reload_", "(", "name", ")", ":", "if", "_service_is_upstart", "(", "name", ")", ":", "cmd", "=", "'reload {0}'", ".", "format", "(", "name", ")", "else", ":", "cmd", "=", "'/sbin/service {0} reload'", ".", "format", "(", "name", ")", "return", "not", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")" ]
Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name>
[ "Reload", "the", "named", "service" ]
python
train
22.866667
mdiener/grace
grace/py27/pyjsdoc.py
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/pyjsdoc.py#L1475-L1481
def htmlize_paragraphs(text): """ Convert paragraphs delimited by blank lines into HTML text enclosed in <p> tags. """ paragraphs = re.split('(\r?\n)\s*(\r?\n)', text) return '\n'.join('<p>%s</p>' % paragraph for paragraph in paragraphs)
[ "def", "htmlize_paragraphs", "(", "text", ")", ":", "paragraphs", "=", "re", ".", "split", "(", "'(\\r?\\n)\\s*(\\r?\\n)'", ",", "text", ")", "return", "'\\n'", ".", "join", "(", "'<p>%s</p>'", "%", "paragraph", "for", "paragraph", "in", "paragraphs", ")" ]
Convert paragraphs delimited by blank lines into HTML text enclosed in <p> tags.
[ "Convert", "paragraphs", "delimited", "by", "blank", "lines", "into", "HTML", "text", "enclosed", "in", "<p", ">", "tags", "." ]
python
train
36.428571
estnltk/estnltk
estnltk/taggers/event_tagger.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/taggers/event_tagger.py#L129-L156
def tag(self, text): """Retrieves list of keywords in text. Parameters ---------- text: Text The text to search for events. Returns ------- list of vents sorted by start, end """ if self.search_method == 'ahocorasick': events = self._find_keywords_ahocorasick(text.text) elif self.search_method == 'naive': events = self._find_keywords_naive(text.text) events = self._resolve_conflicts(events) if self.mapping: for item in events: item['type'] = self.map[ text.text[item['start']:item['end']] ] if self.return_layer: return events else: text[self.layer_name] = events
[ "def", "tag", "(", "self", ",", "text", ")", ":", "if", "self", ".", "search_method", "==", "'ahocorasick'", ":", "events", "=", "self", ".", "_find_keywords_ahocorasick", "(", "text", ".", "text", ")", "elif", "self", ".", "search_method", "==", "'naive'", ":", "events", "=", "self", ".", "_find_keywords_naive", "(", "text", ".", "text", ")", "events", "=", "self", ".", "_resolve_conflicts", "(", "events", ")", "if", "self", ".", "mapping", ":", "for", "item", "in", "events", ":", "item", "[", "'type'", "]", "=", "self", ".", "map", "[", "text", ".", "text", "[", "item", "[", "'start'", "]", ":", "item", "[", "'end'", "]", "]", "]", "if", "self", ".", "return_layer", ":", "return", "events", "else", ":", "text", "[", "self", ".", "layer_name", "]", "=", "events" ]
Retrieves list of keywords in text. Parameters ---------- text: Text The text to search for events. Returns ------- list of vents sorted by start, end
[ "Retrieves", "list", "of", "keywords", "in", "text", "." ]
python
train
27.857143
raiden-network/raiden
raiden/network/transport/matrix/utils.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/transport/matrix/utils.py#L196-L230
def _presence_listener(self, event: Dict[str, Any]): """ Update cached user presence state from Matrix presence events. Due to the possibility of nodes using accounts on multiple homeservers a composite address state is synthesised from the cached individual user presence states. """ if self._stop_event.ready(): return user_id = event['sender'] if event['type'] != 'm.presence' or user_id == self._user_id: return user = self._get_user(user_id) user.displayname = event['content'].get('displayname') or user.displayname address = self._validate_userid_signature(user) if not address: # Malformed address - skip return # not a user we've whitelisted, skip if not self.is_address_known(address): return self.add_userid_for_address(address, user_id) new_state = UserPresence(event['content']['presence']) if new_state == self._userid_to_presence.get(user_id): # Cached presence state matches, no action required return self._userid_to_presence[user_id] = new_state self.refresh_address_presence(address) if self._user_presence_changed_callback: self._user_presence_changed_callback(user, new_state)
[ "def", "_presence_listener", "(", "self", ",", "event", ":", "Dict", "[", "str", ",", "Any", "]", ")", ":", "if", "self", ".", "_stop_event", ".", "ready", "(", ")", ":", "return", "user_id", "=", "event", "[", "'sender'", "]", "if", "event", "[", "'type'", "]", "!=", "'m.presence'", "or", "user_id", "==", "self", ".", "_user_id", ":", "return", "user", "=", "self", ".", "_get_user", "(", "user_id", ")", "user", ".", "displayname", "=", "event", "[", "'content'", "]", ".", "get", "(", "'displayname'", ")", "or", "user", ".", "displayname", "address", "=", "self", ".", "_validate_userid_signature", "(", "user", ")", "if", "not", "address", ":", "# Malformed address - skip", "return", "# not a user we've whitelisted, skip", "if", "not", "self", ".", "is_address_known", "(", "address", ")", ":", "return", "self", ".", "add_userid_for_address", "(", "address", ",", "user_id", ")", "new_state", "=", "UserPresence", "(", "event", "[", "'content'", "]", "[", "'presence'", "]", ")", "if", "new_state", "==", "self", ".", "_userid_to_presence", ".", "get", "(", "user_id", ")", ":", "# Cached presence state matches, no action required", "return", "self", ".", "_userid_to_presence", "[", "user_id", "]", "=", "new_state", "self", ".", "refresh_address_presence", "(", "address", ")", "if", "self", ".", "_user_presence_changed_callback", ":", "self", ".", "_user_presence_changed_callback", "(", "user", ",", "new_state", ")" ]
Update cached user presence state from Matrix presence events. Due to the possibility of nodes using accounts on multiple homeservers a composite address state is synthesised from the cached individual user presence states.
[ "Update", "cached", "user", "presence", "state", "from", "Matrix", "presence", "events", "." ]
python
train
37.857143
oceanprotocol/squid-py
squid_py/agreements/register_service_agreement.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/register_service_agreement.py#L130-L171
def process_agreement_events_publisher(publisher_account, agreement_id, did, service_agreement, price, consumer_address, condition_ids): """ Process the agreement events during the register of the service agreement for the publisher side :param publisher_account: Account instance of the publisher :param agreement_id: id of the agreement, hex str :param did: DID, str :param service_agreement: ServiceAgreement instance :param price: Asset price, int :param consumer_address: ethereum account address of consumer, hex str :param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32 :return: """ conditions_dict = service_agreement.condition_by_name events_manager = EventsManager.get_instance(Keeper.get_instance()) events_manager.watch_lock_reward_event( agreement_id, access_secret_store_condition.fulfillAccessSecretStoreCondition, None, (agreement_id, did, service_agreement, consumer_address, publisher_account), conditions_dict['lockReward'].timeout ) events_manager.watch_access_event( agreement_id, escrow_reward_condition.fulfillEscrowRewardCondition, None, (agreement_id, service_agreement, price, consumer_address, publisher_account, condition_ids), conditions_dict['accessSecretStore'].timeout ) events_manager.watch_reward_event( agreement_id, verify_reward_condition.verifyRewardTokens, None, (agreement_id, did, service_agreement, price, consumer_address, publisher_account), conditions_dict['escrowReward'].timeout )
[ "def", "process_agreement_events_publisher", "(", "publisher_account", ",", "agreement_id", ",", "did", ",", "service_agreement", ",", "price", ",", "consumer_address", ",", "condition_ids", ")", ":", "conditions_dict", "=", "service_agreement", ".", "condition_by_name", "events_manager", "=", "EventsManager", ".", "get_instance", "(", "Keeper", ".", "get_instance", "(", ")", ")", "events_manager", ".", "watch_lock_reward_event", "(", "agreement_id", ",", "access_secret_store_condition", ".", "fulfillAccessSecretStoreCondition", ",", "None", ",", "(", "agreement_id", ",", "did", ",", "service_agreement", ",", "consumer_address", ",", "publisher_account", ")", ",", "conditions_dict", "[", "'lockReward'", "]", ".", "timeout", ")", "events_manager", ".", "watch_access_event", "(", "agreement_id", ",", "escrow_reward_condition", ".", "fulfillEscrowRewardCondition", ",", "None", ",", "(", "agreement_id", ",", "service_agreement", ",", "price", ",", "consumer_address", ",", "publisher_account", ",", "condition_ids", ")", ",", "conditions_dict", "[", "'accessSecretStore'", "]", ".", "timeout", ")", "events_manager", ".", "watch_reward_event", "(", "agreement_id", ",", "verify_reward_condition", ".", "verifyRewardTokens", ",", "None", ",", "(", "agreement_id", ",", "did", ",", "service_agreement", ",", "price", ",", "consumer_address", ",", "publisher_account", ")", ",", "conditions_dict", "[", "'escrowReward'", "]", ".", "timeout", ")" ]
Process the agreement events during the register of the service agreement for the publisher side :param publisher_account: Account instance of the publisher :param agreement_id: id of the agreement, hex str :param did: DID, str :param service_agreement: ServiceAgreement instance :param price: Asset price, int :param consumer_address: ethereum account address of consumer, hex str :param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32 :return:
[ "Process", "the", "agreement", "events", "during", "the", "register", "of", "the", "service", "agreement", "for", "the", "publisher", "side" ]
python
train
40.02381
diging/tethne
tethne/classes/feature.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/feature.py#L301-L322
def top(self, topn=10): """ Get a list of the top ``topn`` features in this :class:`.Feature`\. Examples -------- .. code-block:: python >>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)]) >>> myFeature.top(1) [('trapezoid', 5)] Parameters ---------- topn : int Returns ------- list """ return [self[i] for i in argsort(list(zip(*self))[1])[::-1][:topn]]
[ "def", "top", "(", "self", ",", "topn", "=", "10", ")", ":", "return", "[", "self", "[", "i", "]", "for", "i", "in", "argsort", "(", "list", "(", "zip", "(", "*", "self", ")", ")", "[", "1", "]", ")", "[", ":", ":", "-", "1", "]", "[", ":", "topn", "]", "]" ]
Get a list of the top ``topn`` features in this :class:`.Feature`\. Examples -------- .. code-block:: python >>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)]) >>> myFeature.top(1) [('trapezoid', 5)] Parameters ---------- topn : int Returns ------- list
[ "Get", "a", "list", "of", "the", "top", "topn", "features", "in", "this", ":", "class", ":", ".", "Feature", "\\", "." ]
python
train
21.954545
gawel/panoramisk
panoramisk/message.py
https://github.com/gawel/panoramisk/blob/2ccb5d18be28a8e8f444dc0cd3a3bfb59aa19a8e/panoramisk/message.py#L61-L77
def success(self): """return True if a response status is Success or Follows: .. code-block:: python >>> resp = Message({'Response': 'Success'}) >>> print(resp.success) True >>> resp['Response'] = 'Failed' >>> resp.success False """ if 'event' in self: return True if self.response in self.success_responses: return True return False
[ "def", "success", "(", "self", ")", ":", "if", "'event'", "in", "self", ":", "return", "True", "if", "self", ".", "response", "in", "self", ".", "success_responses", ":", "return", "True", "return", "False" ]
return True if a response status is Success or Follows: .. code-block:: python >>> resp = Message({'Response': 'Success'}) >>> print(resp.success) True >>> resp['Response'] = 'Failed' >>> resp.success False
[ "return", "True", "if", "a", "response", "status", "is", "Success", "or", "Follows", ":" ]
python
test
27.235294
woolfson-group/isambard
isambard/add_ons/knobs_into_holes.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/add_ons/knobs_into_holes.py#L502-L589
def make_pymol(pdb_file, cutoff=7.0, min_kihs=2, outfile=None): """ Pymol script for viewing classic coiled-coil Socket output. Notes ----- For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/. Parameters ---------- pdb_file: str Path to a pdb_file. cutoff: float Socket cutoff in Angstroms. min_kihs: int Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil. outfile: None or str Path to a output file to save the pml script. Returns ------- script_string: str Pymol commands for classic coiled-coil view. """ a = convert_pdb_to_ampal(pdb=pdb_file, path=True) kg = KnobGroup.from_helices(a, cutoff=cutoff) g = kg.filter_graph(kg.graph, cutoff=cutoff, min_kihs=min_kihs) ccs = sorted_connected_components(g) # Opens pymol script, initial set up of screen script_lines = ['load {0}'.format(pdb_file)] script_lines.append("hide all") script_lines.append("bg_color white") script_lines.append("set antialias, 1") script_lines.append("set cartoon_dumbbell_length, 0.35") script_lines.append("set_color lightgrey, [240,240,240]") script_lines.append("set depth_cue, 0") script_lines.append("color lightgrey, all") script_lines.append("cartoon dumbbell") script_lines.append("show cartoon") for cc_number, cc in enumerate(ccs): helices = [x for x in g.nodes() if x.number in cc.nodes()] #helices = cc.nodes() cc_region = kg.get_coiledcoil_region(cc_number=cc_number, cutoff=cutoff, min_kihs=min_kihs) tag_residues_with_heptad_register(cc_region) assigned_regions = kg.get_assigned_regions(include_alt_states=False, complementary_only=False, helices=helices) helix_starts = [int(h[0].id) for h in helices] helix_ends = [int(h[-1].id) for h in helices] chains = [h.ampal_parent.id for h in helices] assigned_starts = [assigned_regions[h.number][0] for h in helices] assigned_ends = [assigned_regions[h.number][1] for h in helices] assigned_selections = ['{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end) for chain, assigned_start, assigned_end in zip(chains, assigned_starts, assigned_ends)] script_lines.append("select cc{0}, {1}".format(cc_number, ' '.join(assigned_selections))) script_lines.append("cartoon automatic, cc{0}".format(cc_number)) for h_number, h in enumerate(helices): chain = chains[h_number] helix_start = helix_starts[h_number] helix_end = helix_ends[h_number] assigned_start = assigned_starts[h_number] assigned_end = assigned_ends[h_number] selection = '{0}/{1}-{2}/'.format(chain, helix_start, helix_end) script_lines.append("select cc{0}eh{1}, {2}".format(cc_number, h_number, selection)) selection = '{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end) script_lines.append("select cc{0}ah{1}, {2}".format(cc_number, h_number, selection)) kihs = [x for x in kg if x.knob_helix == h] for x in kihs: knob_selection_name = 'cc{0}ah{1}k{2}'.format(cc_number, h_number, x.knob_residue.id) hole_selection_name = knob_selection_name + 'hole' knob_selection = '{0}/{1}/'.format(chain, x.knob_residue.id) script_lines.append('select {0}, {1}'.format(knob_selection_name, knob_selection)) hole_selection = ' '.join(['{0}/{1}/'.format(x.hole_chain, y.id) for y in x.hole_residues]) script_lines.append('select {0}, {1}'.format(hole_selection_name, hole_selection)) script_lines.append('show sticks, {0}'.format(knob_selection_name)) script_lines.append('show sticks, {0}'.format(hole_selection_name)) for r in h.get_monomers(): if 'register' in r.tags: color = _heptad_colours[r.tags['register']] script_lines.append('color {0}, {1}/{2}/'.format(color, chain, r.id)) script_lines.append('deselect') script_lines.append('orient') script_lines.append('rotate z, 90') script_lines.append('zoom complete=1') script_string = '\n'.join(script_lines) if outfile is not None: if isinstance(outfile, str) and outfile[-3:] == 'pml': with open(outfile, 'w') as foo: foo.write(script_string) return script_string
[ "def", "make_pymol", "(", "pdb_file", ",", "cutoff", "=", "7.0", ",", "min_kihs", "=", "2", ",", "outfile", "=", "None", ")", ":", "a", "=", "convert_pdb_to_ampal", "(", "pdb", "=", "pdb_file", ",", "path", "=", "True", ")", "kg", "=", "KnobGroup", ".", "from_helices", "(", "a", ",", "cutoff", "=", "cutoff", ")", "g", "=", "kg", ".", "filter_graph", "(", "kg", ".", "graph", ",", "cutoff", "=", "cutoff", ",", "min_kihs", "=", "min_kihs", ")", "ccs", "=", "sorted_connected_components", "(", "g", ")", "# Opens pymol script, initial set up of screen", "script_lines", "=", "[", "'load {0}'", ".", "format", "(", "pdb_file", ")", "]", "script_lines", ".", "append", "(", "\"hide all\"", ")", "script_lines", ".", "append", "(", "\"bg_color white\"", ")", "script_lines", ".", "append", "(", "\"set antialias, 1\"", ")", "script_lines", ".", "append", "(", "\"set cartoon_dumbbell_length, 0.35\"", ")", "script_lines", ".", "append", "(", "\"set_color lightgrey, [240,240,240]\"", ")", "script_lines", ".", "append", "(", "\"set depth_cue, 0\"", ")", "script_lines", ".", "append", "(", "\"color lightgrey, all\"", ")", "script_lines", ".", "append", "(", "\"cartoon dumbbell\"", ")", "script_lines", ".", "append", "(", "\"show cartoon\"", ")", "for", "cc_number", ",", "cc", "in", "enumerate", "(", "ccs", ")", ":", "helices", "=", "[", "x", "for", "x", "in", "g", ".", "nodes", "(", ")", "if", "x", ".", "number", "in", "cc", ".", "nodes", "(", ")", "]", "#helices = cc.nodes()", "cc_region", "=", "kg", ".", "get_coiledcoil_region", "(", "cc_number", "=", "cc_number", ",", "cutoff", "=", "cutoff", ",", "min_kihs", "=", "min_kihs", ")", "tag_residues_with_heptad_register", "(", "cc_region", ")", "assigned_regions", "=", "kg", ".", "get_assigned_regions", "(", "include_alt_states", "=", "False", ",", "complementary_only", "=", "False", ",", "helices", "=", "helices", ")", "helix_starts", "=", "[", "int", "(", "h", "[", "0", "]", ".", "id", ")", "for", "h", "in", "helices", "]", "helix_ends", "=", "[", "int", "(", "h", "[", "-", "1", "]", ".", "id", ")", "for", "h", "in", "helices", "]", "chains", "=", "[", "h", ".", "ampal_parent", ".", "id", "for", "h", "in", "helices", "]", "assigned_starts", "=", "[", "assigned_regions", "[", "h", ".", "number", "]", "[", "0", "]", "for", "h", "in", "helices", "]", "assigned_ends", "=", "[", "assigned_regions", "[", "h", ".", "number", "]", "[", "1", "]", "for", "h", "in", "helices", "]", "assigned_selections", "=", "[", "'{0}/{1}-{2}/'", ".", "format", "(", "chain", ",", "assigned_start", ",", "assigned_end", ")", "for", "chain", ",", "assigned_start", ",", "assigned_end", "in", "zip", "(", "chains", ",", "assigned_starts", ",", "assigned_ends", ")", "]", "script_lines", ".", "append", "(", "\"select cc{0}, {1}\"", ".", "format", "(", "cc_number", ",", "' '", ".", "join", "(", "assigned_selections", ")", ")", ")", "script_lines", ".", "append", "(", "\"cartoon automatic, cc{0}\"", ".", "format", "(", "cc_number", ")", ")", "for", "h_number", ",", "h", "in", "enumerate", "(", "helices", ")", ":", "chain", "=", "chains", "[", "h_number", "]", "helix_start", "=", "helix_starts", "[", "h_number", "]", "helix_end", "=", "helix_ends", "[", "h_number", "]", "assigned_start", "=", "assigned_starts", "[", "h_number", "]", "assigned_end", "=", "assigned_ends", "[", "h_number", "]", "selection", "=", "'{0}/{1}-{2}/'", ".", "format", "(", "chain", ",", "helix_start", ",", "helix_end", ")", "script_lines", ".", "append", "(", "\"select cc{0}eh{1}, {2}\"", ".", "format", "(", "cc_number", ",", "h_number", ",", "selection", ")", ")", "selection", "=", "'{0}/{1}-{2}/'", ".", "format", "(", "chain", ",", "assigned_start", ",", "assigned_end", ")", "script_lines", ".", "append", "(", "\"select cc{0}ah{1}, {2}\"", ".", "format", "(", "cc_number", ",", "h_number", ",", "selection", ")", ")", "kihs", "=", "[", "x", "for", "x", "in", "kg", "if", "x", ".", "knob_helix", "==", "h", "]", "for", "x", "in", "kihs", ":", "knob_selection_name", "=", "'cc{0}ah{1}k{2}'", ".", "format", "(", "cc_number", ",", "h_number", ",", "x", ".", "knob_residue", ".", "id", ")", "hole_selection_name", "=", "knob_selection_name", "+", "'hole'", "knob_selection", "=", "'{0}/{1}/'", ".", "format", "(", "chain", ",", "x", ".", "knob_residue", ".", "id", ")", "script_lines", ".", "append", "(", "'select {0}, {1}'", ".", "format", "(", "knob_selection_name", ",", "knob_selection", ")", ")", "hole_selection", "=", "' '", ".", "join", "(", "[", "'{0}/{1}/'", ".", "format", "(", "x", ".", "hole_chain", ",", "y", ".", "id", ")", "for", "y", "in", "x", ".", "hole_residues", "]", ")", "script_lines", ".", "append", "(", "'select {0}, {1}'", ".", "format", "(", "hole_selection_name", ",", "hole_selection", ")", ")", "script_lines", ".", "append", "(", "'show sticks, {0}'", ".", "format", "(", "knob_selection_name", ")", ")", "script_lines", ".", "append", "(", "'show sticks, {0}'", ".", "format", "(", "hole_selection_name", ")", ")", "for", "r", "in", "h", ".", "get_monomers", "(", ")", ":", "if", "'register'", "in", "r", ".", "tags", ":", "color", "=", "_heptad_colours", "[", "r", ".", "tags", "[", "'register'", "]", "]", "script_lines", ".", "append", "(", "'color {0}, {1}/{2}/'", ".", "format", "(", "color", ",", "chain", ",", "r", ".", "id", ")", ")", "script_lines", ".", "append", "(", "'deselect'", ")", "script_lines", ".", "append", "(", "'orient'", ")", "script_lines", ".", "append", "(", "'rotate z, 90'", ")", "script_lines", ".", "append", "(", "'zoom complete=1'", ")", "script_string", "=", "'\\n'", ".", "join", "(", "script_lines", ")", "if", "outfile", "is", "not", "None", ":", "if", "isinstance", "(", "outfile", ",", "str", ")", "and", "outfile", "[", "-", "3", ":", "]", "==", "'pml'", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "foo", ":", "foo", ".", "write", "(", "script_string", ")", "return", "script_string" ]
Pymol script for viewing classic coiled-coil Socket output. Notes ----- For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/. Parameters ---------- pdb_file: str Path to a pdb_file. cutoff: float Socket cutoff in Angstroms. min_kihs: int Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil. outfile: None or str Path to a output file to save the pml script. Returns ------- script_string: str Pymol commands for classic coiled-coil view.
[ "Pymol", "script", "for", "viewing", "classic", "coiled", "-", "coil", "Socket", "output", "." ]
python
train
51.545455
saltstack/salt
salt/modules/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L1220-L1294
def get_container_setting(name, container, settings): ''' Get the value of the setting for the IIS container. .. versionadded:: 2016.11.0 Args: name (str): The name of the IIS container. container (str): The type of IIS container. The container types are: AppPools, Sites, SslBindings settings (dict): A dictionary of the setting names and their values. Returns: dict: A dictionary of the provided settings and their values. CLI Example: .. code-block:: bash salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools' settings="['processModel.identityType']" ''' ret = dict() ps_cmd = list() ps_cmd_validate = list() container_path = r"IIS:\{0}\{1}".format(container, name) if not settings: log.warning('No settings provided') return ret ps_cmd.append(r'$Settings = @{};') for setting in settings: # Build the commands to verify that the property names are valid. ps_cmd_validate.extend(['Get-ItemProperty', '-Path', "'{0}'".format(container_path), '-Name', "'{0}'".format(setting), '-ErrorAction', 'Stop', '|', 'Out-Null;']) # Some ItemProperties are Strings and others are ConfigurationAttributes. # Since the former doesn't have a Value property, we need to account # for this. ps_cmd.append("$Property = Get-ItemProperty -Path '{0}'".format(container_path)) ps_cmd.append("-Name '{0}' -ErrorAction Stop;".format(setting)) ps_cmd.append(r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and') ps_cmd.append(r"($Property.GetType()).Name -eq 'ConfigurationAttribute') {") ps_cmd.append(r'$Property = $Property | Select-Object') ps_cmd.append(r'-ExpandProperty Value };') ps_cmd.append("$Settings['{0}'] = [String] $Property;".format(setting)) ps_cmd.append(r'$Property = $Null;') # Validate the setting names that were passed in. cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True) if cmd_ret['retcode'] != 0: message = 'One or more invalid property names were specified for the provided container.' raise SaltInvocationError(message) ps_cmd.append('$Settings') cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) if isinstance(items, list): ret.update(items[0]) else: ret.update(items) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') return ret
[ "def", "get_container_setting", "(", "name", ",", "container", ",", "settings", ")", ":", "ret", "=", "dict", "(", ")", "ps_cmd", "=", "list", "(", ")", "ps_cmd_validate", "=", "list", "(", ")", "container_path", "=", "r\"IIS:\\{0}\\{1}\"", ".", "format", "(", "container", ",", "name", ")", "if", "not", "settings", ":", "log", ".", "warning", "(", "'No settings provided'", ")", "return", "ret", "ps_cmd", ".", "append", "(", "r'$Settings = @{};'", ")", "for", "setting", "in", "settings", ":", "# Build the commands to verify that the property names are valid.", "ps_cmd_validate", ".", "extend", "(", "[", "'Get-ItemProperty'", ",", "'-Path'", ",", "\"'{0}'\"", ".", "format", "(", "container_path", ")", ",", "'-Name'", ",", "\"'{0}'\"", ".", "format", "(", "setting", ")", ",", "'-ErrorAction'", ",", "'Stop'", ",", "'|'", ",", "'Out-Null;'", "]", ")", "# Some ItemProperties are Strings and others are ConfigurationAttributes.", "# Since the former doesn't have a Value property, we need to account", "# for this.", "ps_cmd", ".", "append", "(", "\"$Property = Get-ItemProperty -Path '{0}'\"", ".", "format", "(", "container_path", ")", ")", "ps_cmd", ".", "append", "(", "\"-Name '{0}' -ErrorAction Stop;\"", ".", "format", "(", "setting", ")", ")", "ps_cmd", ".", "append", "(", "r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and'", ")", "ps_cmd", ".", "append", "(", "r\"($Property.GetType()).Name -eq 'ConfigurationAttribute') {\"", ")", "ps_cmd", ".", "append", "(", "r'$Property = $Property | Select-Object'", ")", "ps_cmd", ".", "append", "(", "r'-ExpandProperty Value };'", ")", "ps_cmd", ".", "append", "(", "\"$Settings['{0}'] = [String] $Property;\"", ".", "format", "(", "setting", ")", ")", "ps_cmd", ".", "append", "(", "r'$Property = $Null;'", ")", "# Validate the setting names that were passed in.", "cmd_ret", "=", "_srvmgr", "(", "cmd", "=", "ps_cmd_validate", ",", "return_json", "=", "True", ")", "if", "cmd_ret", "[", "'retcode'", "]", "!=", "0", ":", "message", "=", "'One or more invalid property names were specified for the provided container.'", "raise", "SaltInvocationError", "(", "message", ")", "ps_cmd", ".", "append", "(", "'$Settings'", ")", "cmd_ret", "=", "_srvmgr", "(", "cmd", "=", "ps_cmd", ",", "return_json", "=", "True", ")", "try", ":", "items", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "cmd_ret", "[", "'stdout'", "]", ",", "strict", "=", "False", ")", "if", "isinstance", "(", "items", ",", "list", ")", ":", "ret", ".", "update", "(", "items", "[", "0", "]", ")", "else", ":", "ret", ".", "update", "(", "items", ")", "except", "ValueError", ":", "raise", "CommandExecutionError", "(", "'Unable to parse return data as Json.'", ")", "return", "ret" ]
Get the value of the setting for the IIS container. .. versionadded:: 2016.11.0 Args: name (str): The name of the IIS container. container (str): The type of IIS container. The container types are: AppPools, Sites, SslBindings settings (dict): A dictionary of the setting names and their values. Returns: dict: A dictionary of the provided settings and their values. CLI Example: .. code-block:: bash salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools' settings="['processModel.identityType']"
[ "Get", "the", "value", "of", "the", "setting", "for", "the", "IIS", "container", "." ]
python
train
35.96
mapbox/mapbox-cli-py
mapboxcli/scripts/static.py
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/static.py#L18-L47
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size): """ Generate static map images from existing Mapbox map ids. Optionally overlay with geojson features. $ mapbox staticmap --features features.geojson mapbox.satellite out.png $ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png An access token is required, see `mapbox --help`. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None if features: features = list( cligj.normalize_feature_inputs(None, 'features', [features])) service = mapbox.Static(access_token=access_token) try: res = service.image( mapid, lon=lon, lat=lat, z=zoom, width=size[0], height=size[1], features=features, sort_keys=True) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: output.write(res.content) else: raise MapboxCLIException(res.text.strip())
[ "def", "staticmap", "(", "ctx", ",", "mapid", ",", "output", ",", "features", ",", "lat", ",", "lon", ",", "zoom", ",", "size", ")", ":", "access_token", "=", "(", "ctx", ".", "obj", "and", "ctx", ".", "obj", ".", "get", "(", "'access_token'", ")", ")", "or", "None", "if", "features", ":", "features", "=", "list", "(", "cligj", ".", "normalize_feature_inputs", "(", "None", ",", "'features'", ",", "[", "features", "]", ")", ")", "service", "=", "mapbox", ".", "Static", "(", "access_token", "=", "access_token", ")", "try", ":", "res", "=", "service", ".", "image", "(", "mapid", ",", "lon", "=", "lon", ",", "lat", "=", "lat", ",", "z", "=", "zoom", ",", "width", "=", "size", "[", "0", "]", ",", "height", "=", "size", "[", "1", "]", ",", "features", "=", "features", ",", "sort_keys", "=", "True", ")", "except", "mapbox", ".", "errors", ".", "ValidationError", "as", "exc", ":", "raise", "click", ".", "BadParameter", "(", "str", "(", "exc", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "output", ".", "write", "(", "res", ".", "content", ")", "else", ":", "raise", "MapboxCLIException", "(", "res", ".", "text", ".", "strip", "(", ")", ")" ]
Generate static map images from existing Mapbox map ids. Optionally overlay with geojson features. $ mapbox staticmap --features features.geojson mapbox.satellite out.png $ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png An access token is required, see `mapbox --help`.
[ "Generate", "static", "map", "images", "from", "existing", "Mapbox", "map", "ids", ".", "Optionally", "overlay", "with", "geojson", "features", "." ]
python
train
34.133333
mabuchilab/QNET
src/qnet/algebra/core/abstract_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/abstract_algebra.py#L712-L790
def _apply_rules(expr, rules): """Recursively re-instantiate the expression, while applying all of the given `rules` to all encountered (sub-) expressions Args: expr: Any Expression or scalar object rules (list, ~collections.OrderedDict): A list of rules dictionary mapping names to rules, where each rule is a tuple ``(pattern, replacement)`` where `pattern` is an instance of :class:`.Pattern`) and `replacement` is a callable. The pattern will be matched against any expression that is encountered during the re-instantiation. If the `pattern` matches, then the (sub-)expression is replaced by the result of calling `replacement` while passing any wildcards from `pattern` as keyword arguments. If `replacement` raises :exc:`.CannotSimplify`, it will be ignored Note: Instead of or in addition to passing `rules`, `simplify` can often be combined with e.g. `extra_rules` / `extra_binary_rules` context managers. If a simplification can be handled through these context managers, this is usually more efficient than an equivalent rule. However, both really are complementary: the rules defined in the context managers are applied *before* instantiation (hence these these patterns are instantiated through `pattern_head`). In contrast, the patterns defined in `rules` are applied against instantiated expressions. """ if LOG: logger = logging.getLogger('QNET.create') stack = [] path = [] if isinstance(expr, Expression): stack.append(ProtoExpr.from_expr(expr)) path.append(0) if LOG: logger.debug( "Starting at level 1: placing expr on stack: %s", expr) while True: i = path[-1] try: arg = stack[-1][i] if LOG: logger.debug( "At level %d: considering arg %d: %s", len(stack), i+1, arg) except IndexError: # done at this level path.pop() expr = stack.pop().instantiate() expr = _apply_rules_no_recurse(expr, rules) if len(stack) == 0: if LOG: logger.debug( "Complete level 1: returning simplified expr: %s", expr) return expr else: stack[-1][path[-1]] = expr path[-1] += 1 if LOG: logger.debug( "Complete level %d. At level %d, setting arg %d " "to simplified expr: %s", len(stack)+1, len(stack), path[-1], expr) else: if isinstance(arg, Expression): stack.append(ProtoExpr.from_expr(arg)) path.append(0) if LOG: logger.debug(" placing arg on stack") else: # scalar stack[-1][i] = _apply_rules_no_recurse(arg, rules) if LOG: logger.debug( " arg is leaf, replacing with simplified expr: " "%s", stack[-1][i]) path[-1] += 1 else: return _apply_rules_no_recurse(expr, rules)
[ "def", "_apply_rules", "(", "expr", ",", "rules", ")", ":", "if", "LOG", ":", "logger", "=", "logging", ".", "getLogger", "(", "'QNET.create'", ")", "stack", "=", "[", "]", "path", "=", "[", "]", "if", "isinstance", "(", "expr", ",", "Expression", ")", ":", "stack", ".", "append", "(", "ProtoExpr", ".", "from_expr", "(", "expr", ")", ")", "path", ".", "append", "(", "0", ")", "if", "LOG", ":", "logger", ".", "debug", "(", "\"Starting at level 1: placing expr on stack: %s\"", ",", "expr", ")", "while", "True", ":", "i", "=", "path", "[", "-", "1", "]", "try", ":", "arg", "=", "stack", "[", "-", "1", "]", "[", "i", "]", "if", "LOG", ":", "logger", ".", "debug", "(", "\"At level %d: considering arg %d: %s\"", ",", "len", "(", "stack", ")", ",", "i", "+", "1", ",", "arg", ")", "except", "IndexError", ":", "# done at this level", "path", ".", "pop", "(", ")", "expr", "=", "stack", ".", "pop", "(", ")", ".", "instantiate", "(", ")", "expr", "=", "_apply_rules_no_recurse", "(", "expr", ",", "rules", ")", "if", "len", "(", "stack", ")", "==", "0", ":", "if", "LOG", ":", "logger", ".", "debug", "(", "\"Complete level 1: returning simplified expr: %s\"", ",", "expr", ")", "return", "expr", "else", ":", "stack", "[", "-", "1", "]", "[", "path", "[", "-", "1", "]", "]", "=", "expr", "path", "[", "-", "1", "]", "+=", "1", "if", "LOG", ":", "logger", ".", "debug", "(", "\"Complete level %d. At level %d, setting arg %d \"", "\"to simplified expr: %s\"", ",", "len", "(", "stack", ")", "+", "1", ",", "len", "(", "stack", ")", ",", "path", "[", "-", "1", "]", ",", "expr", ")", "else", ":", "if", "isinstance", "(", "arg", ",", "Expression", ")", ":", "stack", ".", "append", "(", "ProtoExpr", ".", "from_expr", "(", "arg", ")", ")", "path", ".", "append", "(", "0", ")", "if", "LOG", ":", "logger", ".", "debug", "(", "\" placing arg on stack\"", ")", "else", ":", "# scalar", "stack", "[", "-", "1", "]", "[", "i", "]", "=", "_apply_rules_no_recurse", "(", "arg", ",", "rules", ")", "if", "LOG", ":", "logger", ".", "debug", "(", "\" arg is leaf, replacing with simplified expr: \"", "\"%s\"", ",", "stack", "[", "-", "1", "]", "[", "i", "]", ")", "path", "[", "-", "1", "]", "+=", "1", "else", ":", "return", "_apply_rules_no_recurse", "(", "expr", ",", "rules", ")" ]
Recursively re-instantiate the expression, while applying all of the given `rules` to all encountered (sub-) expressions Args: expr: Any Expression or scalar object rules (list, ~collections.OrderedDict): A list of rules dictionary mapping names to rules, where each rule is a tuple ``(pattern, replacement)`` where `pattern` is an instance of :class:`.Pattern`) and `replacement` is a callable. The pattern will be matched against any expression that is encountered during the re-instantiation. If the `pattern` matches, then the (sub-)expression is replaced by the result of calling `replacement` while passing any wildcards from `pattern` as keyword arguments. If `replacement` raises :exc:`.CannotSimplify`, it will be ignored Note: Instead of or in addition to passing `rules`, `simplify` can often be combined with e.g. `extra_rules` / `extra_binary_rules` context managers. If a simplification can be handled through these context managers, this is usually more efficient than an equivalent rule. However, both really are complementary: the rules defined in the context managers are applied *before* instantiation (hence these these patterns are instantiated through `pattern_head`). In contrast, the patterns defined in `rules` are applied against instantiated expressions.
[ "Recursively", "re", "-", "instantiate", "the", "expression", "while", "applying", "all", "of", "the", "given", "rules", "to", "all", "encountered", "(", "sub", "-", ")", "expressions" ]
python
train
44.189873
OSSOS/MOP
src/jjk/preproc/MOPfits_old.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPfits_old.py#L631-L646
def find_proc_date(header): """Search the HISTORY fields of a header looking for the FLIPS processing date. """ import string, re for h in header.ascardlist(): if h.key=="HISTORY": g=h.value if ( string.find(g,'FLIPS 1.0 -:') ): result=re.search('imred: FLIPS 1.0 - \S{3} (.*) - ([\s\d]\d:\d\d:\d\d)\s*$',g) if result: date=result.group(1) time=result.group(2) datetime=date+" "+time return datetime return None
[ "def", "find_proc_date", "(", "header", ")", ":", "import", "string", ",", "re", "for", "h", "in", "header", ".", "ascardlist", "(", ")", ":", "if", "h", ".", "key", "==", "\"HISTORY\"", ":", "g", "=", "h", ".", "value", "if", "(", "string", ".", "find", "(", "g", ",", "'FLIPS 1.0 -:'", ")", ")", ":", "result", "=", "re", ".", "search", "(", "'imred: FLIPS 1.0 - \\S{3} (.*) - ([\\s\\d]\\d:\\d\\d:\\d\\d)\\s*$'", ",", "g", ")", "if", "result", ":", "date", "=", "result", ".", "group", "(", "1", ")", "time", "=", "result", ".", "group", "(", "2", ")", "datetime", "=", "date", "+", "\" \"", "+", "time", "return", "datetime", "return", "None" ]
Search the HISTORY fields of a header looking for the FLIPS processing date.
[ "Search", "the", "HISTORY", "fields", "of", "a", "header", "looking", "for", "the", "FLIPS", "processing", "date", "." ]
python
train
35.25
chaoss/grimoirelab-perceval
perceval/backends/core/phabricator.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/phabricator.py#L544-L555
def phids(self, *phids): """Retrieve data about PHIDs. :params phids: list of PHIDs """ params = { self.PHIDS: phids } response = self._call(self.PHAB_PHIDS, params) return response
[ "def", "phids", "(", "self", ",", "*", "phids", ")", ":", "params", "=", "{", "self", ".", "PHIDS", ":", "phids", "}", "response", "=", "self", ".", "_call", "(", "self", ".", "PHAB_PHIDS", ",", "params", ")", "return", "response" ]
Retrieve data about PHIDs. :params phids: list of PHIDs
[ "Retrieve", "data", "about", "PHIDs", "." ]
python
test
20.083333
jakebasile/reap
reap/api/admin.py
https://github.com/jakebasile/reap/blob/c90c033c5388f5380155001957b26b1a930311f0/reap/api/admin.py#L84-L97
def create_project(self, name, client_id, budget = None, budget_by = 'none', notes = None, billable = True): '''Creates a Project with the given information.''' project = {'project':{ 'name': name, 'client_id': client_id, 'budget_by': budget_by, 'budget': budget, 'notes': notes, 'billable': billable, }} response = self.post_request('projects/', project, follow = True) if response: return Project(self, response['project'])
[ "def", "create_project", "(", "self", ",", "name", ",", "client_id", ",", "budget", "=", "None", ",", "budget_by", "=", "'none'", ",", "notes", "=", "None", ",", "billable", "=", "True", ")", ":", "project", "=", "{", "'project'", ":", "{", "'name'", ":", "name", ",", "'client_id'", ":", "client_id", ",", "'budget_by'", ":", "budget_by", ",", "'budget'", ":", "budget", ",", "'notes'", ":", "notes", ",", "'billable'", ":", "billable", ",", "}", "}", "response", "=", "self", ".", "post_request", "(", "'projects/'", ",", "project", ",", "follow", "=", "True", ")", "if", "response", ":", "return", "Project", "(", "self", ",", "response", "[", "'project'", "]", ")" ]
Creates a Project with the given information.
[ "Creates", "a", "Project", "with", "the", "given", "information", "." ]
python
train
38.642857
poldracklab/niworkflows
niworkflows/viz/plots.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/plots.py#L550-L673
def compcor_variance_plot(metadata_files, metadata_sources=None, output_file=None, varexp_thresh=(0.5, 0.7, 0.9), fig=None): """ Parameters ---------- metadata_files: list List of paths to files containing component metadata. If more than one decomposition has been performed (e.g., anatomical and temporal CompCor decompositions), then all metadata files can be provided in the list. However, each metadata file should have a corresponding entry in `metadata_sources`. metadata_sources: list or None List of source names (e.g., ['aCompCor']) for decompositions. This list should be of the same length as `metadata_files`. output_file: str or None Path where the output figure should be saved. If this is not defined, then the plotting axes will be returned instead of the saved figure path. varexp_thresh: tuple Set of variance thresholds to include in the plot (default 0.5, 0.7, 0.9). fig: figure or None Existing figure on which to plot. Returns ------- ax: axes Plotting axes. Returned only if the `output_file` parameter is None. output_file: str The file where the figure is saved. """ metadata = {} if metadata_sources is None: if len(metadata_files) == 1: metadata_sources = ['CompCor'] else: metadata_sources = ['Decomposition {:d}'.format(i) for i in range(len(metadata_files))] for file, source in zip(metadata_files, metadata_sources): metadata[source] = pd.read_table(str(file)) metadata[source]['source'] = source metadata = pd.concat(list(metadata.values())) bbox_txt = { 'boxstyle': 'round', 'fc': 'white', 'ec': 'none', 'color': 'none', 'linewidth': 0, 'alpha': 0.8 } decompositions = [] data_sources = list(metadata.groupby(['source', 'mask']).groups.keys()) for source, mask in data_sources: if not np.isnan( metadata.loc[ (metadata['source'] == source) & (metadata['mask'] == mask) ]['singular_value'].values[0]): decompositions.append((source, mask)) if fig is not None: ax = [fig.add_subplot(1, len(decompositions), i+1) for i in range(len(decompositions))] elif len(decompositions) > 1: fig, ax = plt.subplots(1, len(decompositions), figsize=(5*len(decompositions), 5)) else: ax = [plt.axes()] for m, (source, mask) in enumerate(decompositions): components = metadata[(metadata['mask'] == mask) & (metadata['source'] == source)] if len([m for s, m in decompositions if s == source]) > 1: title_mask = ' ({} mask)'.format(mask) else: title_mask = '' fig_title = '{}{}'.format(source, title_mask) ax[m].plot(np.arange(components.shape[0]+1), [0] + list( 100*components['cumulative_variance_explained']), color='purple', linewidth=2.5) ax[m].grid(False) ax[m].set_xlabel('number of components in model') ax[m].set_ylabel('cumulative variance explained (%)') ax[m].set_title(fig_title) varexp = {} for i, thr in enumerate(varexp_thresh): varexp[thr] = np.searchsorted( components['cumulative_variance_explained'], thr) + 1 ax[m].axhline(y=100*thr, color='lightgrey', linewidth=0.25) ax[m].axvline(x=varexp[thr], color='C{}'.format(i), linewidth=2, linestyle=':') ax[m].text(0, 100*thr, '{:.0f}'.format(100*thr), fontsize='x-small', bbox=bbox_txt) ax[m].text(varexp[thr][0], 25, '{} components explain\n{:.0f}% of variance'.format( varexp[thr][0], 100*thr), rotation=90, horizontalalignment='center', fontsize='xx-small', bbox=bbox_txt) ax[m].set_yticks([]) ax[m].set_yticklabels([]) for tick in ax[m].xaxis.get_major_ticks(): tick.label.set_fontsize('x-small') tick.label.set_rotation('vertical') for side in ['top', 'right', 'left']: ax[m].spines[side].set_color('none') ax[m].spines[side].set_visible(False) if output_file is not None: figure = plt.gcf() figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file return ax
[ "def", "compcor_variance_plot", "(", "metadata_files", ",", "metadata_sources", "=", "None", ",", "output_file", "=", "None", ",", "varexp_thresh", "=", "(", "0.5", ",", "0.7", ",", "0.9", ")", ",", "fig", "=", "None", ")", ":", "metadata", "=", "{", "}", "if", "metadata_sources", "is", "None", ":", "if", "len", "(", "metadata_files", ")", "==", "1", ":", "metadata_sources", "=", "[", "'CompCor'", "]", "else", ":", "metadata_sources", "=", "[", "'Decomposition {:d}'", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "metadata_files", ")", ")", "]", "for", "file", ",", "source", "in", "zip", "(", "metadata_files", ",", "metadata_sources", ")", ":", "metadata", "[", "source", "]", "=", "pd", ".", "read_table", "(", "str", "(", "file", ")", ")", "metadata", "[", "source", "]", "[", "'source'", "]", "=", "source", "metadata", "=", "pd", ".", "concat", "(", "list", "(", "metadata", ".", "values", "(", ")", ")", ")", "bbox_txt", "=", "{", "'boxstyle'", ":", "'round'", ",", "'fc'", ":", "'white'", ",", "'ec'", ":", "'none'", ",", "'color'", ":", "'none'", ",", "'linewidth'", ":", "0", ",", "'alpha'", ":", "0.8", "}", "decompositions", "=", "[", "]", "data_sources", "=", "list", "(", "metadata", ".", "groupby", "(", "[", "'source'", ",", "'mask'", "]", ")", ".", "groups", ".", "keys", "(", ")", ")", "for", "source", ",", "mask", "in", "data_sources", ":", "if", "not", "np", ".", "isnan", "(", "metadata", ".", "loc", "[", "(", "metadata", "[", "'source'", "]", "==", "source", ")", "&", "(", "metadata", "[", "'mask'", "]", "==", "mask", ")", "]", "[", "'singular_value'", "]", ".", "values", "[", "0", "]", ")", ":", "decompositions", ".", "append", "(", "(", "source", ",", "mask", ")", ")", "if", "fig", "is", "not", "None", ":", "ax", "=", "[", "fig", ".", "add_subplot", "(", "1", ",", "len", "(", "decompositions", ")", ",", "i", "+", "1", ")", "for", "i", "in", "range", "(", "len", "(", "decompositions", ")", ")", "]", "elif", "len", "(", "decompositions", ")", ">", "1", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "len", "(", "decompositions", ")", ",", "figsize", "=", "(", "5", "*", "len", "(", "decompositions", ")", ",", "5", ")", ")", "else", ":", "ax", "=", "[", "plt", ".", "axes", "(", ")", "]", "for", "m", ",", "(", "source", ",", "mask", ")", "in", "enumerate", "(", "decompositions", ")", ":", "components", "=", "metadata", "[", "(", "metadata", "[", "'mask'", "]", "==", "mask", ")", "&", "(", "metadata", "[", "'source'", "]", "==", "source", ")", "]", "if", "len", "(", "[", "m", "for", "s", ",", "m", "in", "decompositions", "if", "s", "==", "source", "]", ")", ">", "1", ":", "title_mask", "=", "' ({} mask)'", ".", "format", "(", "mask", ")", "else", ":", "title_mask", "=", "''", "fig_title", "=", "'{}{}'", ".", "format", "(", "source", ",", "title_mask", ")", "ax", "[", "m", "]", ".", "plot", "(", "np", ".", "arange", "(", "components", ".", "shape", "[", "0", "]", "+", "1", ")", ",", "[", "0", "]", "+", "list", "(", "100", "*", "components", "[", "'cumulative_variance_explained'", "]", ")", ",", "color", "=", "'purple'", ",", "linewidth", "=", "2.5", ")", "ax", "[", "m", "]", ".", "grid", "(", "False", ")", "ax", "[", "m", "]", ".", "set_xlabel", "(", "'number of components in model'", ")", "ax", "[", "m", "]", ".", "set_ylabel", "(", "'cumulative variance explained (%)'", ")", "ax", "[", "m", "]", ".", "set_title", "(", "fig_title", ")", "varexp", "=", "{", "}", "for", "i", ",", "thr", "in", "enumerate", "(", "varexp_thresh", ")", ":", "varexp", "[", "thr", "]", "=", "np", ".", "searchsorted", "(", "components", "[", "'cumulative_variance_explained'", "]", ",", "thr", ")", "+", "1", "ax", "[", "m", "]", ".", "axhline", "(", "y", "=", "100", "*", "thr", ",", "color", "=", "'lightgrey'", ",", "linewidth", "=", "0.25", ")", "ax", "[", "m", "]", ".", "axvline", "(", "x", "=", "varexp", "[", "thr", "]", ",", "color", "=", "'C{}'", ".", "format", "(", "i", ")", ",", "linewidth", "=", "2", ",", "linestyle", "=", "':'", ")", "ax", "[", "m", "]", ".", "text", "(", "0", ",", "100", "*", "thr", ",", "'{:.0f}'", ".", "format", "(", "100", "*", "thr", ")", ",", "fontsize", "=", "'x-small'", ",", "bbox", "=", "bbox_txt", ")", "ax", "[", "m", "]", ".", "text", "(", "varexp", "[", "thr", "]", "[", "0", "]", ",", "25", ",", "'{} components explain\\n{:.0f}% of variance'", ".", "format", "(", "varexp", "[", "thr", "]", "[", "0", "]", ",", "100", "*", "thr", ")", ",", "rotation", "=", "90", ",", "horizontalalignment", "=", "'center'", ",", "fontsize", "=", "'xx-small'", ",", "bbox", "=", "bbox_txt", ")", "ax", "[", "m", "]", ".", "set_yticks", "(", "[", "]", ")", "ax", "[", "m", "]", ".", "set_yticklabels", "(", "[", "]", ")", "for", "tick", "in", "ax", "[", "m", "]", ".", "xaxis", ".", "get_major_ticks", "(", ")", ":", "tick", ".", "label", ".", "set_fontsize", "(", "'x-small'", ")", "tick", ".", "label", ".", "set_rotation", "(", "'vertical'", ")", "for", "side", "in", "[", "'top'", ",", "'right'", ",", "'left'", "]", ":", "ax", "[", "m", "]", ".", "spines", "[", "side", "]", ".", "set_color", "(", "'none'", ")", "ax", "[", "m", "]", ".", "spines", "[", "side", "]", ".", "set_visible", "(", "False", ")", "if", "output_file", "is", "not", "None", ":", "figure", "=", "plt", ".", "gcf", "(", ")", "figure", ".", "savefig", "(", "output_file", ",", "bbox_inches", "=", "'tight'", ")", "plt", ".", "close", "(", "figure", ")", "figure", "=", "None", "return", "output_file", "return", "ax" ]
Parameters ---------- metadata_files: list List of paths to files containing component metadata. If more than one decomposition has been performed (e.g., anatomical and temporal CompCor decompositions), then all metadata files can be provided in the list. However, each metadata file should have a corresponding entry in `metadata_sources`. metadata_sources: list or None List of source names (e.g., ['aCompCor']) for decompositions. This list should be of the same length as `metadata_files`. output_file: str or None Path where the output figure should be saved. If this is not defined, then the plotting axes will be returned instead of the saved figure path. varexp_thresh: tuple Set of variance thresholds to include in the plot (default 0.5, 0.7, 0.9). fig: figure or None Existing figure on which to plot. Returns ------- ax: axes Plotting axes. Returned only if the `output_file` parameter is None. output_file: str The file where the figure is saved.
[ "Parameters", "----------", "metadata_files", ":", "list", "List", "of", "paths", "to", "files", "containing", "component", "metadata", ".", "If", "more", "than", "one", "decomposition", "has", "been", "performed", "(", "e", ".", "g", ".", "anatomical", "and", "temporal", "CompCor", "decompositions", ")", "then", "all", "metadata", "files", "can", "be", "provided", "in", "the", "list", ".", "However", "each", "metadata", "file", "should", "have", "a", "corresponding", "entry", "in", "metadata_sources", ".", "metadata_sources", ":", "list", "or", "None", "List", "of", "source", "names", "(", "e", ".", "g", ".", "[", "aCompCor", "]", ")", "for", "decompositions", ".", "This", "list", "should", "be", "of", "the", "same", "length", "as", "metadata_files", ".", "output_file", ":", "str", "or", "None", "Path", "where", "the", "output", "figure", "should", "be", "saved", ".", "If", "this", "is", "not", "defined", "then", "the", "plotting", "axes", "will", "be", "returned", "instead", "of", "the", "saved", "figure", "path", ".", "varexp_thresh", ":", "tuple", "Set", "of", "variance", "thresholds", "to", "include", "in", "the", "plot", "(", "default", "0", ".", "5", "0", ".", "7", "0", ".", "9", ")", ".", "fig", ":", "figure", "or", "None", "Existing", "figure", "on", "which", "to", "plot", "." ]
python
train
38.41129
pywbem/pywbem
examples/wbemcli_server.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/examples/wbemcli_server.py#L12-L27
def print_profile_info(org_vm, profile_instance): """ Print information on a profile defined by profile_instance. Parameters: org_vm: The value mapping for CIMRegisterdProfile and RegisteredOrganization so that the value and not value mapping is displayed. profile_instance: instance of a profile to be printed """ org = org_vm.tovalues(profile_instance['RegisteredOrganization']) name = profile_instance['RegisteredName'] vers = profile_instance['RegisteredVersion'] print(" %s %s Profile %s" % (org, name, vers))
[ "def", "print_profile_info", "(", "org_vm", ",", "profile_instance", ")", ":", "org", "=", "org_vm", ".", "tovalues", "(", "profile_instance", "[", "'RegisteredOrganization'", "]", ")", "name", "=", "profile_instance", "[", "'RegisteredName'", "]", "vers", "=", "profile_instance", "[", "'RegisteredVersion'", "]", "print", "(", "\" %s %s Profile %s\"", "%", "(", "org", ",", "name", ",", "vers", ")", ")" ]
Print information on a profile defined by profile_instance. Parameters: org_vm: The value mapping for CIMRegisterdProfile and RegisteredOrganization so that the value and not value mapping is displayed. profile_instance: instance of a profile to be printed
[ "Print", "information", "on", "a", "profile", "defined", "by", "profile_instance", "." ]
python
train
35.4375
wdbm/abstraction
abstraction.py
https://github.com/wdbm/abstraction/blob/58c81e73954cc6b4cd2f79b2216467528a96376b/abstraction.py#L314-L373
def load_exchange_word_vectors( filename = "database.db", maximum_number_of_events = None ): """ Load exchange data and return dataset. """ log.info("load word vectors of database {filename}".format( filename = filename )) # Ensure that the database exists. if not os.path.isfile(filename): log.info("database {filename} nonexistent".format( filename = filename )) program.terminate() raise Exception # Access the database. database = access_database(filename = filename) # Access or create the exchanges table. table_exchanges = database["exchanges"] # Access exchanges. table_name = "exchanges" # Create a datavision dataset. data = datavision.Dataset() # progress progress = shijian.Progress() progress.engage_quick_calculation_mode() number_of_entries = len(database[table_name]) index = 0 for index_entry, entry in enumerate(database[table_name].all()): if maximum_number_of_events is not None and\ index >= int(maximum_number_of_events): log.info( "loaded maximum requested number of events " + "({maximum_number_of_events})\r".format( maximum_number_of_events = maximum_number_of_events ) ) break #unique_identifier = str(entry["id"]) utteranceWordVector = str(entry["utteranceWordVector"]) responseWordVector = str(entry["responseWordVector"]) if utteranceWordVector != "None" and responseWordVector != "None": index += 1 utteranceWordVector = eval("np." + utteranceWordVector.replace("float32", "np.float32")) responseWordVector = eval("np." + responseWordVector.replace("float32", "np.float32")) data.variable(index = index, name = "utteranceWordVector", value = utteranceWordVector) data.variable(index = index, name = "responseWordVector", value = responseWordVector ) #utteranceWordVector = list(eval("np." + utteranceWordVector.replace("float32", "np.float32"))) #responseWordVector = list(eval("np." + responseWordVector.replace("float32", "np.float32"))) #for index_component, component in enumerate(utteranceWordVector): # data.variable(index = index, name = "uwv" + str(index_component), value = component) #for index_component, component in enumerate(responseWordVector): # data.variable(index = index, name = "rwv" + str(index_component), value = component) print progress.add_datum(fraction = index_entry / number_of_entries), return data
[ "def", "load_exchange_word_vectors", "(", "filename", "=", "\"database.db\"", ",", "maximum_number_of_events", "=", "None", ")", ":", "log", ".", "info", "(", "\"load word vectors of database {filename}\"", ".", "format", "(", "filename", "=", "filename", ")", ")", "# Ensure that the database exists.", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "log", ".", "info", "(", "\"database {filename} nonexistent\"", ".", "format", "(", "filename", "=", "filename", ")", ")", "program", ".", "terminate", "(", ")", "raise", "Exception", "# Access the database.", "database", "=", "access_database", "(", "filename", "=", "filename", ")", "# Access or create the exchanges table.", "table_exchanges", "=", "database", "[", "\"exchanges\"", "]", "# Access exchanges.", "table_name", "=", "\"exchanges\"", "# Create a datavision dataset.", "data", "=", "datavision", ".", "Dataset", "(", ")", "# progress", "progress", "=", "shijian", ".", "Progress", "(", ")", "progress", ".", "engage_quick_calculation_mode", "(", ")", "number_of_entries", "=", "len", "(", "database", "[", "table_name", "]", ")", "index", "=", "0", "for", "index_entry", ",", "entry", "in", "enumerate", "(", "database", "[", "table_name", "]", ".", "all", "(", ")", ")", ":", "if", "maximum_number_of_events", "is", "not", "None", "and", "index", ">=", "int", "(", "maximum_number_of_events", ")", ":", "log", ".", "info", "(", "\"loaded maximum requested number of events \"", "+", "\"({maximum_number_of_events})\\r\"", ".", "format", "(", "maximum_number_of_events", "=", "maximum_number_of_events", ")", ")", "break", "#unique_identifier = str(entry[\"id\"])", "utteranceWordVector", "=", "str", "(", "entry", "[", "\"utteranceWordVector\"", "]", ")", "responseWordVector", "=", "str", "(", "entry", "[", "\"responseWordVector\"", "]", ")", "if", "utteranceWordVector", "!=", "\"None\"", "and", "responseWordVector", "!=", "\"None\"", ":", "index", "+=", "1", "utteranceWordVector", "=", "eval", "(", "\"np.\"", "+", "utteranceWordVector", ".", "replace", "(", "\"float32\"", ",", "\"np.float32\"", ")", ")", "responseWordVector", "=", "eval", "(", "\"np.\"", "+", "responseWordVector", ".", "replace", "(", "\"float32\"", ",", "\"np.float32\"", ")", ")", "data", ".", "variable", "(", "index", "=", "index", ",", "name", "=", "\"utteranceWordVector\"", ",", "value", "=", "utteranceWordVector", ")", "data", ".", "variable", "(", "index", "=", "index", ",", "name", "=", "\"responseWordVector\"", ",", "value", "=", "responseWordVector", ")", "#utteranceWordVector = list(eval(\"np.\" + utteranceWordVector.replace(\"float32\", \"np.float32\")))", "#responseWordVector = list(eval(\"np.\" + responseWordVector.replace(\"float32\", \"np.float32\")))", "#for index_component, component in enumerate(utteranceWordVector):", "# data.variable(index = index, name = \"uwv\" + str(index_component), value = component)", "#for index_component, component in enumerate(responseWordVector):", "# data.variable(index = index, name = \"rwv\" + str(index_component), value = component)", "print", "progress", ".", "add_datum", "(", "fraction", "=", "index_entry", "/", "number_of_entries", ")", ",", "return", "data" ]
Load exchange data and return dataset.
[ "Load", "exchange", "data", "and", "return", "dataset", "." ]
python
train
44.6
ungarj/s2reader
s2reader/s2reader.py
https://github.com/ungarj/s2reader/blob/376fd7ee1d15cce0849709c149d694663a7bc0ef/s2reader/s2reader.py#L357-L428
def band_path(self, band_id, for_gdal=False, absolute=False): """Return paths of given band's jp2 files for all granules.""" band_id = str(band_id).zfill(2) if not isinstance(band_id, str) or band_id not in BAND_IDS: raise ValueError("band ID not valid: %s" % band_id) if self.dataset.is_zip and for_gdal: zip_prefix = "/vsizip/" if absolute: granule_basepath = zip_prefix + os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = zip_prefix + os.path.dirname( self.dataset.product_metadata_path ) else: if absolute: granule_basepath = os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = os.path.dirname( self.dataset.product_metadata_path ) product_org = self.dataset._product_metadata.iter( "Product_Organisation").next() granule_item = [ g for g in chain(*[gl for gl in product_org.iter("Granule_List")]) if self.granule_identifier == g.attrib["granuleIdentifier"] ] if len(granule_item) != 1: raise S2ReaderMetadataError( "Granule ID cannot be found in product metadata." ) rel_path = [ f.text for f in granule_item[0].iter() if f.text[-2:] == band_id ] if len(rel_path) != 1: # Apparently some SAFE files don't contain all bands. In such a # case, raise a warning and return None. warnings.warn( "%s: image path to band %s could not be extracted" % ( self.dataset.path, band_id ) ) return img_path = os.path.join(granule_basepath, rel_path[0]) + ".jp2" # Above solution still fails on the "safe" test dataset. Therefore, # the path gets checked if it contains the IMG_DATA folder and if not, # try to guess the path from the old schema. Not happy with this but # couldn't find a better way yet. if "IMG_DATA" in img_path: return img_path else: if self.dataset.is_zip: zip_prefix = "/vsizip/" granule_basepath = zip_prefix + os.path.join( self.dataset.path, self.granule_path) else: granule_basepath = self.granule_path return os.path.join( os.path.join(granule_basepath, "IMG_DATA"), "".join([ "_".join((self.granule_identifier).split("_")[:-1]), "_B", band_id, ".jp2" ]) )
[ "def", "band_path", "(", "self", ",", "band_id", ",", "for_gdal", "=", "False", ",", "absolute", "=", "False", ")", ":", "band_id", "=", "str", "(", "band_id", ")", ".", "zfill", "(", "2", ")", "if", "not", "isinstance", "(", "band_id", ",", "str", ")", "or", "band_id", "not", "in", "BAND_IDS", ":", "raise", "ValueError", "(", "\"band ID not valid: %s\"", "%", "band_id", ")", "if", "self", ".", "dataset", ".", "is_zip", "and", "for_gdal", ":", "zip_prefix", "=", "\"/vsizip/\"", "if", "absolute", ":", "granule_basepath", "=", "zip_prefix", "+", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dataset", ".", "path", ",", "self", ".", "dataset", ".", "product_metadata_path", ")", ")", "else", ":", "granule_basepath", "=", "zip_prefix", "+", "os", ".", "path", ".", "dirname", "(", "self", ".", "dataset", ".", "product_metadata_path", ")", "else", ":", "if", "absolute", ":", "granule_basepath", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dataset", ".", "path", ",", "self", ".", "dataset", ".", "product_metadata_path", ")", ")", "else", ":", "granule_basepath", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "dataset", ".", "product_metadata_path", ")", "product_org", "=", "self", ".", "dataset", ".", "_product_metadata", ".", "iter", "(", "\"Product_Organisation\"", ")", ".", "next", "(", ")", "granule_item", "=", "[", "g", "for", "g", "in", "chain", "(", "*", "[", "gl", "for", "gl", "in", "product_org", ".", "iter", "(", "\"Granule_List\"", ")", "]", ")", "if", "self", ".", "granule_identifier", "==", "g", ".", "attrib", "[", "\"granuleIdentifier\"", "]", "]", "if", "len", "(", "granule_item", ")", "!=", "1", ":", "raise", "S2ReaderMetadataError", "(", "\"Granule ID cannot be found in product metadata.\"", ")", "rel_path", "=", "[", "f", ".", "text", "for", "f", "in", "granule_item", "[", "0", "]", ".", "iter", "(", ")", "if", "f", ".", "text", "[", "-", "2", ":", "]", "==", "band_id", "]", "if", "len", "(", "rel_path", ")", "!=", "1", ":", "# Apparently some SAFE files don't contain all bands. In such a", "# case, raise a warning and return None.", "warnings", ".", "warn", "(", "\"%s: image path to band %s could not be extracted\"", "%", "(", "self", ".", "dataset", ".", "path", ",", "band_id", ")", ")", "return", "img_path", "=", "os", ".", "path", ".", "join", "(", "granule_basepath", ",", "rel_path", "[", "0", "]", ")", "+", "\".jp2\"", "# Above solution still fails on the \"safe\" test dataset. Therefore,", "# the path gets checked if it contains the IMG_DATA folder and if not,", "# try to guess the path from the old schema. Not happy with this but", "# couldn't find a better way yet.", "if", "\"IMG_DATA\"", "in", "img_path", ":", "return", "img_path", "else", ":", "if", "self", ".", "dataset", ".", "is_zip", ":", "zip_prefix", "=", "\"/vsizip/\"", "granule_basepath", "=", "zip_prefix", "+", "os", ".", "path", ".", "join", "(", "self", ".", "dataset", ".", "path", ",", "self", ".", "granule_path", ")", "else", ":", "granule_basepath", "=", "self", ".", "granule_path", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "granule_basepath", ",", "\"IMG_DATA\"", ")", ",", "\"\"", ".", "join", "(", "[", "\"_\"", ".", "join", "(", "(", "self", ".", "granule_identifier", ")", ".", "split", "(", "\"_\"", ")", "[", ":", "-", "1", "]", ")", ",", "\"_B\"", ",", "band_id", ",", "\".jp2\"", "]", ")", ")" ]
Return paths of given band's jp2 files for all granules.
[ "Return", "paths", "of", "given", "band", "s", "jp2", "files", "for", "all", "granules", "." ]
python
train
41.152778
MoseleyBioinformaticsLab/ctfile
ctfile/tokenizer.py
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/tokenizer.py#L150-L162
def _ctab_property_block(stream): """Process properties block of ``Ctab``. :param stream: Queue containing lines of text. :type stream: :py:class:`collections.deque` :return: Tuples of data. :rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine` """ line = stream.popleft() while line != 'M END': name = line.split()[1] yield CtabPropertiesBlockLine(name, line) line = stream.popleft()
[ "def", "_ctab_property_block", "(", "stream", ")", ":", "line", "=", "stream", ".", "popleft", "(", ")", "while", "line", "!=", "'M END'", ":", "name", "=", "line", ".", "split", "(", ")", "[", "1", "]", "yield", "CtabPropertiesBlockLine", "(", "name", ",", "line", ")", "line", "=", "stream", ".", "popleft", "(", ")" ]
Process properties block of ``Ctab``. :param stream: Queue containing lines of text. :type stream: :py:class:`collections.deque` :return: Tuples of data. :rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine`
[ "Process", "properties", "block", "of", "Ctab", "." ]
python
train
33.461538
F5Networks/f5-common-python
f5/bigip/mixins.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/mixins.py#L220-L235
def _check_command_result(self): """If command result exists run these checks.""" if self.commandResult.startswith('/bin/bash'): raise UtilError('%s' % self.commandResult.split(' ', 1)[1]) if self.commandResult.startswith('/bin/mv'): raise UtilError('%s' % self.commandResult.split(' ', 1)[1]) if self.commandResult.startswith('/bin/ls'): raise UtilError('%s' % self.commandResult.split(' ', 1)[1]) if self.commandResult.startswith('/bin/rm'): raise UtilError('%s' % self.commandResult.split(' ', 1)[1]) if 'invalid option' in self.commandResult: raise UtilError('%s' % self.commandResult) if 'Invalid option' in self.commandResult: raise UtilError('%s' % self.commandResult) if 'usage: /usr/bin/get_dossier' in self.commandResult: raise UtilError('%s' % self.commandResult)
[ "def", "_check_command_result", "(", "self", ")", ":", "if", "self", ".", "commandResult", ".", "startswith", "(", "'/bin/bash'", ")", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", ")", "if", "self", ".", "commandResult", ".", "startswith", "(", "'/bin/mv'", ")", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", ")", "if", "self", ".", "commandResult", ".", "startswith", "(", "'/bin/ls'", ")", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", ")", "if", "self", ".", "commandResult", ".", "startswith", "(", "'/bin/rm'", ")", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", ")", "if", "'invalid option'", "in", "self", ".", "commandResult", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ")", "if", "'Invalid option'", "in", "self", ".", "commandResult", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ")", "if", "'usage: /usr/bin/get_dossier'", "in", "self", ".", "commandResult", ":", "raise", "UtilError", "(", "'%s'", "%", "self", ".", "commandResult", ")" ]
If command result exists run these checks.
[ "If", "command", "result", "exists", "run", "these", "checks", "." ]
python
train
56.6875
taxjar/taxjar-python
taxjar/client.py
https://github.com/taxjar/taxjar-python/blob/be9b30d7dc968d24e066c7c133849fee180f8d95/taxjar/client.py#L126-L129
def validate(self, vat_deets): """Validates an existing VAT identification number against VIES.""" request = self._get('validation', vat_deets) return self.responder(request)
[ "def", "validate", "(", "self", ",", "vat_deets", ")", ":", "request", "=", "self", ".", "_get", "(", "'validation'", ",", "vat_deets", ")", "return", "self", ".", "responder", "(", "request", ")" ]
Validates an existing VAT identification number against VIES.
[ "Validates", "an", "existing", "VAT", "identification", "number", "against", "VIES", "." ]
python
train
48.75
edoburu/django-debugtools
debugtools/formatter.py
https://github.com/edoburu/django-debugtools/blob/5c609c00fa9954330cd135fc62a1e18b8e7fea8a/debugtools/formatter.py#L129-L152
def _style_text(text): """ Apply some HTML highlighting to the contents. This can't be done in the """ # Escape text and apply some formatting. # To have really good highlighting, pprint would have to be re-implemented. text = escape(text) text = text.replace(' &lt;iterator object&gt;', " <small>&lt;<var>this object can be used in a 'for' loop</var>&gt;</small>") text = text.replace(' &lt;dynamic item&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = text.replace(' &lt;dynamic attribute&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = RE_PROXY.sub('\g<1><small>&lt;<var>proxy object</var>&gt;</small>', text) text = RE_FUNCTION.sub('\g<1><small>&lt;<var>object method</var>&gt;</small>', text) text = RE_GENERATOR.sub("\g<1><small>&lt;<var>generator, use 'for' to traverse it</var>&gt;</small>", text) text = RE_OBJECT_ADDRESS.sub('\g<1><small>&lt;<var>\g<2> object</var>&gt;</small>', text) text = RE_MANAGER.sub('\g<1><small>&lt;<var>manager, use <kbd>.all</kbd> to traverse it</var>&gt;</small>', text) text = RE_CLASS_REPR.sub('\g<1><small>&lt;<var>\g<2> class</var>&gt;</small>', text) # Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text) text = RE_REQUEST_CLEANUP1.sub('\g<1>', text) text = RE_REQUEST_CLEANUP2.sub(')', text) return mark_safe(text)
[ "def", "_style_text", "(", "text", ")", ":", "# Escape text and apply some formatting.", "# To have really good highlighting, pprint would have to be re-implemented.", "text", "=", "escape", "(", "text", ")", "text", "=", "text", ".", "replace", "(", "' &lt;iterator object&gt;'", ",", "\" <small>&lt;<var>this object can be used in a 'for' loop</var>&gt;</small>\"", ")", "text", "=", "text", ".", "replace", "(", "' &lt;dynamic item&gt;'", ",", "' <small>&lt;<var>this object may have extra field names</var>&gt;</small>'", ")", "text", "=", "text", ".", "replace", "(", "' &lt;dynamic attribute&gt;'", ",", "' <small>&lt;<var>this object may have extra field names</var>&gt;</small>'", ")", "text", "=", "RE_PROXY", ".", "sub", "(", "'\\g<1><small>&lt;<var>proxy object</var>&gt;</small>'", ",", "text", ")", "text", "=", "RE_FUNCTION", ".", "sub", "(", "'\\g<1><small>&lt;<var>object method</var>&gt;</small>'", ",", "text", ")", "text", "=", "RE_GENERATOR", ".", "sub", "(", "\"\\g<1><small>&lt;<var>generator, use 'for' to traverse it</var>&gt;</small>\"", ",", "text", ")", "text", "=", "RE_OBJECT_ADDRESS", ".", "sub", "(", "'\\g<1><small>&lt;<var>\\g<2> object</var>&gt;</small>'", ",", "text", ")", "text", "=", "RE_MANAGER", ".", "sub", "(", "'\\g<1><small>&lt;<var>manager, use <kbd>.all</kbd> to traverse it</var>&gt;</small>'", ",", "text", ")", "text", "=", "RE_CLASS_REPR", ".", "sub", "(", "'\\g<1><small>&lt;<var>\\g<2> class</var>&gt;</small>'", ",", "text", ")", "# Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent", "text", "=", "RE_REQUEST_FIELDNAME", ".", "sub", "(", "'\\g<1>:\\n <strong style=\"color: #222;\">\\g<2></strong>: '", ",", "text", ")", "text", "=", "RE_REQUEST_CLEANUP1", ".", "sub", "(", "'\\g<1>'", ",", "text", ")", "text", "=", "RE_REQUEST_CLEANUP2", ".", "sub", "(", "')'", ",", "text", ")", "return", "mark_safe", "(", "text", ")" ]
Apply some HTML highlighting to the contents. This can't be done in the
[ "Apply", "some", "HTML", "highlighting", "to", "the", "contents", ".", "This", "can", "t", "be", "done", "in", "the" ]
python
test
64.666667
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L4147-L4183
def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags): r""" /** * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will * return to when no compute running processes (e.g. CUDA application which have an active context) are running * * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. * Requires root/admin permissions. * * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock * rates are desired. * * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost * behavior. * * @param device The identifier of the target device * @param enabled What state to try to set default Auto Boosted clocks of the target device to * @param flags Flags that change the default behavior. Currently Unused. * * @return * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * */ nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled """ fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled") ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags)) _nvmlCheckReturn(ret) return None
[ "def", "nvmlDeviceSetDefaultAutoBoostedClocksEnabled", "(", "handle", ",", "enabled", ",", "flags", ")", ":", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceSetDefaultAutoBoostedClocksEnabled\"", ")", "ret", "=", "fn", "(", "handle", ",", "_nvmlEnableState_t", "(", "enabled", ")", ",", "c_uint", "(", "flags", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "None" ]
r""" /** * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will * return to when no compute running processes (e.g. CUDA application which have an active context) are running * * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. * Requires root/admin permissions. * * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock * rates are desired. * * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost * behavior. * * @param device The identifier of the target device * @param enabled What state to try to set default Auto Boosted clocks of the target device to * @param flags Flags that change the default behavior. Currently Unused. * * @return * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * */ nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled
[ "r", "/", "**", "*", "Try", "to", "set", "the", "default", "state", "of", "Auto", "Boosted", "clocks", "on", "a", "device", ".", "This", "is", "the", "default", "state", "that", "Auto", "Boosted", "clocks", "will", "*", "return", "to", "when", "no", "compute", "running", "processes", "(", "e", ".", "g", ".", "CUDA", "application", "which", "have", "an", "active", "context", ")", "are", "running", "*", "*", "For", "Kepler", "&tm", ";", "or", "newer", "non", "-", "GeForce", "fully", "supported", "devices", "and", "Maxwell", "or", "newer", "GeForce", "devices", ".", "*", "Requires", "root", "/", "admin", "permissions", ".", "*", "*", "Auto", "Boosted", "clocks", "are", "enabled", "by", "default", "on", "some", "hardware", "allowing", "the", "GPU", "to", "run", "at", "higher", "clock", "rates", "*", "to", "maximize", "performance", "as", "thermal", "limits", "allow", ".", "Auto", "Boosted", "clocks", "should", "be", "disabled", "if", "fixed", "clock", "*", "rates", "are", "desired", ".", "*", "*", "On", "Pascal", "and", "newer", "hardware", "Auto", "Boosted", "clocks", "are", "controlled", "through", "application", "clocks", ".", "*", "Use", "\\", "ref", "nvmlDeviceSetApplicationsClocks", "and", "\\", "ref", "nvmlDeviceResetApplicationsClocks", "to", "control", "Auto", "Boost", "*", "behavior", ".", "*", "*" ]
python
train
63.486486
PGower/PyCanvas
pycanvas/apis/assignments.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L40-L89
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None): """ List assignments. Returns the list of assignments for the current context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If "observed_users" is passed, submissions for observed users will also be included as an array.""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - search_term """The partial title of the assignments to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - override_assignment_dates """Apply assignment overrides for each assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - bucket """If included, only return certain assignments depending on due date and submission status.""" if bucket is not None: self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"]) params["bucket"] = bucket # OPTIONAL - assignment_ids """if set, return only assignments specified""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_assignments", "(", "self", ",", "course_id", ",", "assignment_ids", "=", "None", ",", "bucket", "=", "None", ",", "include", "=", "None", ",", "needs_grading_count_by_section", "=", "None", ",", "override_assignment_dates", "=", "None", ",", "search_term", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# OPTIONAL - include\r", "\"\"\"Associations to include with the assignment. The \"assignment_visibility\" option\r\n requires that the Differentiated Assignments course feature be turned on. If\r\n \"observed_users\" is passed, submissions for observed users will also be included as an array.\"\"\"", "if", "include", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "include", ",", "[", "\"submission\"", ",", "\"assignment_visibility\"", ",", "\"all_dates\"", ",", "\"overrides\"", ",", "\"observed_users\"", "]", ")", "params", "[", "\"include\"", "]", "=", "include", "# OPTIONAL - search_term\r", "\"\"\"The partial title of the assignments to match and return.\"\"\"", "if", "search_term", "is", "not", "None", ":", "params", "[", "\"search_term\"", "]", "=", "search_term", "# OPTIONAL - override_assignment_dates\r", "\"\"\"Apply assignment overrides for each assignment, defaults to true.\"\"\"", "if", "override_assignment_dates", "is", "not", "None", ":", "params", "[", "\"override_assignment_dates\"", "]", "=", "override_assignment_dates", "# OPTIONAL - needs_grading_count_by_section\r", "\"\"\"Split up \"needs_grading_count\" by sections into the \"needs_grading_count_by_section\" key, defaults to false\"\"\"", "if", "needs_grading_count_by_section", "is", "not", "None", ":", "params", "[", "\"needs_grading_count_by_section\"", "]", "=", "needs_grading_count_by_section", "# OPTIONAL - bucket\r", "\"\"\"If included, only return certain assignments depending on due date and submission status.\"\"\"", "if", "bucket", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "bucket", ",", "[", "\"past\"", ",", "\"overdue\"", ",", "\"undated\"", ",", "\"ungraded\"", ",", "\"unsubmitted\"", ",", "\"upcoming\"", ",", "\"future\"", "]", ")", "params", "[", "\"bucket\"", "]", "=", "bucket", "# OPTIONAL - assignment_ids\r", "\"\"\"if set, return only assignments specified\"\"\"", "if", "assignment_ids", "is", "not", "None", ":", "params", "[", "\"assignment_ids\"", "]", "=", "assignment_ids", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/assignments\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
List assignments. Returns the list of assignments for the current context.
[ "List", "assignments", ".", "Returns", "the", "list", "of", "assignments", "for", "the", "current", "context", "." ]
python
train
50.5
QuantEcon/QuantEcon.py
quantecon/random/utilities.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/random/utilities.py#L66-L89
def _probvec(r, out): """ Fill `out` with randomly sampled probability vectors as rows. To be complied as a ufunc by guvectorize of Numba. The inputs must have the same shape except the last axis; the length of the last axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is k, then r.shape[-1] must be k-1. Parameters ---------- r : ndarray(float) Array containing random values in [0, 1). out : ndarray(float) Output array. """ n = r.shape[0] r.sort() out[0] = r[0] for i in range(1, n): out[i] = r[i] - r[i-1] out[n] = 1 - r[n-1]
[ "def", "_probvec", "(", "r", ",", "out", ")", ":", "n", "=", "r", ".", "shape", "[", "0", "]", "r", ".", "sort", "(", ")", "out", "[", "0", "]", "=", "r", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "out", "[", "i", "]", "=", "r", "[", "i", "]", "-", "r", "[", "i", "-", "1", "]", "out", "[", "n", "]", "=", "1", "-", "r", "[", "n", "-", "1", "]" ]
Fill `out` with randomly sampled probability vectors as rows. To be complied as a ufunc by guvectorize of Numba. The inputs must have the same shape except the last axis; the length of the last axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is k, then r.shape[-1] must be k-1. Parameters ---------- r : ndarray(float) Array containing random values in [0, 1). out : ndarray(float) Output array.
[ "Fill", "out", "with", "randomly", "sampled", "probability", "vectors", "as", "rows", "." ]
python
train
25.625
zarr-developers/zarr
zarr/hierarchy.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L791-L810
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs): """Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.hierarchy.Group.create_dataset`. Parameters ---------- name : string Array name. shape : int or tuple of ints Array shape. dtype : string or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype. """ return self._write_op(self._require_dataset_nosync, name, shape=shape, dtype=dtype, exact=exact, **kwargs)
[ "def", "require_dataset", "(", "self", ",", "name", ",", "shape", ",", "dtype", "=", "None", ",", "exact", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_write_op", "(", "self", ".", "_require_dataset_nosync", ",", "name", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "exact", "=", "exact", ",", "*", "*", "kwargs", ")" ]
Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.hierarchy.Group.create_dataset`. Parameters ---------- name : string Array name. shape : int or tuple of ints Array shape. dtype : string or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype.
[ "Obtain", "an", "array", "creating", "if", "it", "doesn", "t", "exist", ".", "Other", "kwargs", "are", "as", "per", ":", "func", ":", "zarr", ".", "hierarchy", ".", "Group", ".", "create_dataset", "." ]
python
train
35.95
hubo1016/vlcp
vlcp/event/pqueue.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/pqueue.py#L812-L828
def append(self, event, force = False): ''' Append an event to queue. The events are classified and appended to sub-queues :param event: input event :param force: if True, the event is appended even if the queue is full :returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise ''' if self.tree is None: if self.parent is None: raise IndexError('The queue is removed') else: return self.parent.parent.append(event, force) q = self.tree.matchfirst(event) return q.append(event, force)
[ "def", "append", "(", "self", ",", "event", ",", "force", "=", "False", ")", ":", "if", "self", ".", "tree", "is", "None", ":", "if", "self", ".", "parent", "is", "None", ":", "raise", "IndexError", "(", "'The queue is removed'", ")", "else", ":", "return", "self", ".", "parent", ".", "parent", ".", "append", "(", "event", ",", "force", ")", "q", "=", "self", ".", "tree", ".", "matchfirst", "(", "event", ")", "return", "q", ".", "append", "(", "event", ",", "force", ")" ]
Append an event to queue. The events are classified and appended to sub-queues :param event: input event :param force: if True, the event is appended even if the queue is full :returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
[ "Append", "an", "event", "to", "queue", ".", "The", "events", "are", "classified", "and", "appended", "to", "sub", "-", "queues", ":", "param", "event", ":", "input", "event", ":", "param", "force", ":", "if", "True", "the", "event", "is", "appended", "even", "if", "the", "queue", "is", "full", ":", "returns", ":", "None", "if", "appended", "successfully", "or", "a", "matcher", "to", "match", "a", "QueueCanWriteEvent", "otherwise" ]
python
train
38.705882
scheibler/khard
khard/carddav_object.py
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/carddav_object.py#L221-L235
def get_first_name_last_name(self): """ :rtype: str """ names = [] if self._get_first_names(): names += self._get_first_names() if self._get_additional_names(): names += self._get_additional_names() if self._get_last_names(): names += self._get_last_names() if names: return helpers.list_to_string(names, " ") else: return self.get_full_name()
[ "def", "get_first_name_last_name", "(", "self", ")", ":", "names", "=", "[", "]", "if", "self", ".", "_get_first_names", "(", ")", ":", "names", "+=", "self", ".", "_get_first_names", "(", ")", "if", "self", ".", "_get_additional_names", "(", ")", ":", "names", "+=", "self", ".", "_get_additional_names", "(", ")", "if", "self", ".", "_get_last_names", "(", ")", ":", "names", "+=", "self", ".", "_get_last_names", "(", ")", "if", "names", ":", "return", "helpers", ".", "list_to_string", "(", "names", ",", "\" \"", ")", "else", ":", "return", "self", ".", "get_full_name", "(", ")" ]
:rtype: str
[ ":", "rtype", ":", "str" ]
python
test
30.733333
NICTA/revrand
revrand/mathfun/special.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/mathfun/special.py#L91-L124
def softplus(X): """ Pass X through a soft-plus function, , in a numerically stable way (using the log-sum-exp trick). The softplus transformation is: .. math:: \log(1 + \exp\{X\}) Parameters ---------- X: ndarray shape (N,) array or shape (N, D) array of data. Returns ------- spX: ndarray array of same shape of X with the result of softmax(X). """ if np.isscalar(X): return logsumexp(np.vstack((np.zeros(1), [X])).T, axis=1)[0] N = X.shape[0] if X.ndim == 1: return logsumexp(np.vstack((np.zeros(N), X)).T, axis=1) elif X.ndim == 2: sftX = np.empty(X.shape, dtype=float) for d in range(X.shape[1]): sftX[:, d] = logsumexp(np.vstack((np.zeros(N), X[:, d])).T, axis=1) return sftX else: raise ValueError("This only works on up to 2D arrays.")
[ "def", "softplus", "(", "X", ")", ":", "if", "np", ".", "isscalar", "(", "X", ")", ":", "return", "logsumexp", "(", "np", ".", "vstack", "(", "(", "np", ".", "zeros", "(", "1", ")", ",", "[", "X", "]", ")", ")", ".", "T", ",", "axis", "=", "1", ")", "[", "0", "]", "N", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "ndim", "==", "1", ":", "return", "logsumexp", "(", "np", ".", "vstack", "(", "(", "np", ".", "zeros", "(", "N", ")", ",", "X", ")", ")", ".", "T", ",", "axis", "=", "1", ")", "elif", "X", ".", "ndim", "==", "2", ":", "sftX", "=", "np", ".", "empty", "(", "X", ".", "shape", ",", "dtype", "=", "float", ")", "for", "d", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "sftX", "[", ":", ",", "d", "]", "=", "logsumexp", "(", "np", ".", "vstack", "(", "(", "np", ".", "zeros", "(", "N", ")", ",", "X", "[", ":", ",", "d", "]", ")", ")", ".", "T", ",", "axis", "=", "1", ")", "return", "sftX", "else", ":", "raise", "ValueError", "(", "\"This only works on up to 2D arrays.\"", ")" ]
Pass X through a soft-plus function, , in a numerically stable way (using the log-sum-exp trick). The softplus transformation is: .. math:: \log(1 + \exp\{X\}) Parameters ---------- X: ndarray shape (N,) array or shape (N, D) array of data. Returns ------- spX: ndarray array of same shape of X with the result of softmax(X).
[ "Pass", "X", "through", "a", "soft", "-", "plus", "function", "in", "a", "numerically", "stable", "way", "(", "using", "the", "log", "-", "sum", "-", "exp", "trick", ")", "." ]
python
train
27.323529
fracpete/python-weka-wrapper3
python/weka/flow/source.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/source.py#L116-L133
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(FileSupplier, self).fix_config(options) opt = "files" if opt not in options: options[opt] = [] if opt not in self.help: self.help[opt] = "The files to output (list of string)." return options
[ "def", "fix_config", "(", "self", ",", "options", ")", ":", "options", "=", "super", "(", "FileSupplier", ",", "self", ")", ".", "fix_config", "(", "options", ")", "opt", "=", "\"files\"", "if", "opt", "not", "in", "options", ":", "options", "[", "opt", "]", "=", "[", "]", "if", "opt", "not", "in", "self", ".", "help", ":", "self", ".", "help", "[", "opt", "]", "=", "\"The files to output (list of string).\"", "return", "options" ]
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
[ "Fixes", "the", "options", "if", "necessary", ".", "I", ".", "e", ".", "it", "adds", "all", "required", "elements", "to", "the", "dictionary", "." ]
python
train
30.5
ellmetha/django-machina
machina/apps/forum_conversation/forms.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/forms.py#L197-L224
def save(self, commit=True): """ Saves the instance. """ if not self.instance.pk: # First, handle topic creation if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']): topic_type = self.cleaned_data['topic_type'] else: topic_type = Topic.TOPIC_POST topic = Topic( forum=self.forum, subject=self.cleaned_data['subject'], # The topic's name is the post's name type=topic_type, status=Topic.TOPIC_UNLOCKED, approved=self.perm_handler.can_post_without_approval(self.forum, self.user), ) if not self.user.is_anonymous: topic.poster = self.user self.topic = topic if commit: topic.save() else: if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']): if self.instance.topic.type != self.cleaned_data['topic_type']: self.instance.topic.type = self.cleaned_data['topic_type'] self.instance.topic._simple_save() return super().save(commit)
[ "def", "save", "(", "self", ",", "commit", "=", "True", ")", ":", "if", "not", "self", ".", "instance", ".", "pk", ":", "# First, handle topic creation", "if", "'topic_type'", "in", "self", ".", "cleaned_data", "and", "len", "(", "self", ".", "cleaned_data", "[", "'topic_type'", "]", ")", ":", "topic_type", "=", "self", ".", "cleaned_data", "[", "'topic_type'", "]", "else", ":", "topic_type", "=", "Topic", ".", "TOPIC_POST", "topic", "=", "Topic", "(", "forum", "=", "self", ".", "forum", ",", "subject", "=", "self", ".", "cleaned_data", "[", "'subject'", "]", ",", "# The topic's name is the post's name", "type", "=", "topic_type", ",", "status", "=", "Topic", ".", "TOPIC_UNLOCKED", ",", "approved", "=", "self", ".", "perm_handler", ".", "can_post_without_approval", "(", "self", ".", "forum", ",", "self", ".", "user", ")", ",", ")", "if", "not", "self", ".", "user", ".", "is_anonymous", ":", "topic", ".", "poster", "=", "self", ".", "user", "self", ".", "topic", "=", "topic", "if", "commit", ":", "topic", ".", "save", "(", ")", "else", ":", "if", "'topic_type'", "in", "self", ".", "cleaned_data", "and", "len", "(", "self", ".", "cleaned_data", "[", "'topic_type'", "]", ")", ":", "if", "self", ".", "instance", ".", "topic", ".", "type", "!=", "self", ".", "cleaned_data", "[", "'topic_type'", "]", ":", "self", ".", "instance", ".", "topic", ".", "type", "=", "self", ".", "cleaned_data", "[", "'topic_type'", "]", "self", ".", "instance", ".", "topic", ".", "_simple_save", "(", ")", "return", "super", "(", ")", ".", "save", "(", "commit", ")" ]
Saves the instance.
[ "Saves", "the", "instance", "." ]
python
train
42.571429
johnnoone/aioconsul
aioconsul/client/health_endpoint.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/health_endpoint.py#L16-L69
async def node(self, node, *, dc=None, watch=None, consistency=None): """Returns the health info of a node. Parameters: node (ObjectID): Node ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: CollectionMeta: where value is a list of checks returns the checks specific of a node. It returns a body like this:: [ { "Node": "foobar", "CheckID": "serfHealth", "Name": "Serf Health Status", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "", "ServiceName": "" }, { "Node": "foobar", "CheckID": "service:redis", "Name": "Service 'redis' check", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "redis", "ServiceName": "redis" } ] In this case, we can see there is a system level check (that is, a check with no associated ``ServiceID``) as well as a service check for Redis. The "serfHealth" check is special in that it is automatically present on every node. When a node joins the Consul cluster, it is part of a distributed failure detection provided by Serf. If a node fails, it is detected and the status is automatically changed to ``critical``. """ node_id = extract_attr(node, keys=["Node", "ID"]) params = {"dc": dc} response = await self._api.get("/v1/health/node", node_id, params=params, watch=watch, consistency=consistency) return consul(response)
[ "async", "def", "node", "(", "self", ",", "node", ",", "*", ",", "dc", "=", "None", ",", "watch", "=", "None", ",", "consistency", "=", "None", ")", ":", "node_id", "=", "extract_attr", "(", "node", ",", "keys", "=", "[", "\"Node\"", ",", "\"ID\"", "]", ")", "params", "=", "{", "\"dc\"", ":", "dc", "}", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/health/node\"", ",", "node_id", ",", "params", "=", "params", ",", "watch", "=", "watch", ",", "consistency", "=", "consistency", ")", "return", "consul", "(", "response", ")" ]
Returns the health info of a node. Parameters: node (ObjectID): Node ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: CollectionMeta: where value is a list of checks returns the checks specific of a node. It returns a body like this:: [ { "Node": "foobar", "CheckID": "serfHealth", "Name": "Serf Health Status", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "", "ServiceName": "" }, { "Node": "foobar", "CheckID": "service:redis", "Name": "Service 'redis' check", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "redis", "ServiceName": "redis" } ] In this case, we can see there is a system level check (that is, a check with no associated ``ServiceID``) as well as a service check for Redis. The "serfHealth" check is special in that it is automatically present on every node. When a node joins the Consul cluster, it is part of a distributed failure detection provided by Serf. If a node fails, it is detected and the status is automatically changed to ``critical``.
[ "Returns", "the", "health", "info", "of", "a", "node", "." ]
python
train
37.740741
NAMD/pypln.api
pypln/api.py
https://github.com/NAMD/pypln.api/blob/ccb73fd80ca094669a85bd3991dc84a8564ab016/pypln/api.py#L275-L282
def documents(self, full=False): '''Return list of documents owned by user. If `full=True`, it'll download all pages returned by the HTTP server''' url = self.base_url + self.DOCUMENTS_PAGE class_ = Document results = self._retrieve_resources(url, class_, full) return results
[ "def", "documents", "(", "self", ",", "full", "=", "False", ")", ":", "url", "=", "self", ".", "base_url", "+", "self", ".", "DOCUMENTS_PAGE", "class_", "=", "Document", "results", "=", "self", ".", "_retrieve_resources", "(", "url", ",", "class_", ",", "full", ")", "return", "results" ]
Return list of documents owned by user. If `full=True`, it'll download all pages returned by the HTTP server
[ "Return", "list", "of", "documents", "owned", "by", "user", "." ]
python
train
39.75
ultrabug/py3status
py3status/modules/xrandr.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/xrandr.py#L272-L279
def _get_string_and_set_width(self, combination, mode): """ Construct the string to be displayed and record the max width. """ show = "{}".format(self._separator(mode)).join(combination) show = show.rstrip("{}".format(self._separator(mode))) self.max_width = max([self.max_width, len(show)]) return show
[ "def", "_get_string_and_set_width", "(", "self", ",", "combination", ",", "mode", ")", ":", "show", "=", "\"{}\"", ".", "format", "(", "self", ".", "_separator", "(", "mode", ")", ")", ".", "join", "(", "combination", ")", "show", "=", "show", ".", "rstrip", "(", "\"{}\"", ".", "format", "(", "self", ".", "_separator", "(", "mode", ")", ")", ")", "self", ".", "max_width", "=", "max", "(", "[", "self", ".", "max_width", ",", "len", "(", "show", ")", "]", ")", "return", "show" ]
Construct the string to be displayed and record the max width.
[ "Construct", "the", "string", "to", "be", "displayed", "and", "record", "the", "max", "width", "." ]
python
train
44
jrderuiter/pybiomart
src/pybiomart/base.py
https://github.com/jrderuiter/pybiomart/blob/7802d45fe88549ab0512d6f37f815fc43b172b39/src/pybiomart/base.py#L95-L112
def get(self, **params): """Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request. """ if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
[ "def", "get", "(", "self", ",", "*", "*", "params", ")", ":", "if", "self", ".", "_use_cache", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "url", ",", "params", "=", "params", ")", "else", ":", "with", "requests_cache", ".", "disabled", "(", ")", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "url", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "return", "r" ]
Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request.
[ "Performs", "get", "request", "to", "the", "biomart", "service", "." ]
python
train
32.055556
pymc-devs/pymc
pymc/Node.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Node.py#L42-L54
def logp_gradient_of_set(variable_set, calculation_set=None): """ Calculates the gradient of the joint log posterior with respect to all the variables in variable_set. Calculation of the log posterior is restricted to the variables in calculation_set. Returns a dictionary of the gradients. """ logp_gradients = {} for variable in variable_set: logp_gradients[variable] = logp_gradient(variable, calculation_set) return logp_gradients
[ "def", "logp_gradient_of_set", "(", "variable_set", ",", "calculation_set", "=", "None", ")", ":", "logp_gradients", "=", "{", "}", "for", "variable", "in", "variable_set", ":", "logp_gradients", "[", "variable", "]", "=", "logp_gradient", "(", "variable", ",", "calculation_set", ")", "return", "logp_gradients" ]
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set. Calculation of the log posterior is restricted to the variables in calculation_set. Returns a dictionary of the gradients.
[ "Calculates", "the", "gradient", "of", "the", "joint", "log", "posterior", "with", "respect", "to", "all", "the", "variables", "in", "variable_set", ".", "Calculation", "of", "the", "log", "posterior", "is", "restricted", "to", "the", "variables", "in", "calculation_set", "." ]
python
train
35.769231
DataKitchen/DKCloudCommand
DKCloudCommand/cli/__main__.py
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/cli/__main__.py#L822-L827
def delete_orderrun(backend, orderrun_id): """ Delete the orderrun specified by the argument. """ click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green') check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip()))
[ "def", "delete_orderrun", "(", "backend", ",", "orderrun_id", ")", ":", "click", ".", "secho", "(", "'%s - Deleting orderrun %s'", "%", "(", "get_datetime", "(", ")", ",", "orderrun_id", ")", ",", "fg", "=", "'green'", ")", "check_and_print", "(", "DKCloudCommandRunner", ".", "delete_orderrun", "(", "backend", ".", "dki", ",", "orderrun_id", ".", "strip", "(", ")", ")", ")" ]
Delete the orderrun specified by the argument.
[ "Delete", "the", "orderrun", "specified", "by", "the", "argument", "." ]
python
train
47.5
GeorgeArgyros/sfalearn
sfalearn/observationtableinit.py
https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/observationtableinit.py#L200-L287
def _init_using_k_equivalence(self, given_graph, sfa=False): """ Args: given_graph (DFA): The DFA states sfa (boolean): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors """ graph = DFA(self.alphabet) graph.init_from_acceptor(given_graph) graph.fixminimized(self.alphabet) # Access Strings self.access_strings_map = self._bfs_path_states(graph, sorted( graph.states, key=attrgetter('initial'), reverse=True)[0]) # Find Q set_q = set(self._object_set_to_state_list(graph.states)) # We will work with states addresses here instead of states stateid for # more convenience set_f = set(self._object_set_to_state_list(self._get_accepted(graph))) # Perform P := {F, Q-F} set_nf = set_q.copy() - set_f.copy() self.groups = [set_f.copy(), set_nf.copy()] self.bookeeping = [(set_f, set_nf, '')] done = False while not done: done = True new_groups = [] for selectgroup in self.groups: # _check for each letter if it splits the current group for character in self.alphabet: # print 'Testing symbol: ', c target = defaultdict(list) target_states = defaultdict(int) new_g = [set(selectgroup)] for sid in selectgroup: # _check if all transitions using c are going in a state # in the same group. If they are going on a different # group then split deststate = self._delta(graph, graph[sid], character) destgroup = self._get_group_from_state( deststate.stateid) target[destgroup].append(sid) target_states[destgroup] = deststate.stateid if len(target) > 1: inv_target_states = { v: k for k, v in target_states.iteritems()} new_g = [set(selectedstate) for selectedstate in target.values()] done = False # Get all the partitions of destgroups queue = [set([x for x in target_states.values()])] while queue: top = queue.pop(0) (group1, group2, distinguish_string) = self._partition_group(top) ng1 = self._reverse_to_source( target, [inv_target_states[x] for x in group1]) ng2 = self._reverse_to_source( target, [inv_target_states[x] for x in group2]) dist_string = character + distinguish_string self.bookeeping.append((ng1, ng2, dist_string)) if len(group1) > 1: queue.append(group1) if len(group2) > 1: queue.append(group2) break new_groups += new_g # End of iteration for the k-equivalence # Assign new groups and check if any change occured self.groups = new_groups sm_vector = [ i for (a, i) in sorted( self.access_strings_map.items(), key=lambda x: len(x[1]))] if not sfa: smi_vector = ['{}{}'.format(a, b) for b in self.alphabet for a in sm_vector] else: smi_vector = self._init_smi(graph, self.access_strings_map) em_vector = [distinguish_string for (_, _, distinguish_string) in self.bookeeping] return sm_vector, smi_vector, em_vector
[ "def", "_init_using_k_equivalence", "(", "self", ",", "given_graph", ",", "sfa", "=", "False", ")", ":", "graph", "=", "DFA", "(", "self", ".", "alphabet", ")", "graph", ".", "init_from_acceptor", "(", "given_graph", ")", "graph", ".", "fixminimized", "(", "self", ".", "alphabet", ")", "# Access Strings", "self", ".", "access_strings_map", "=", "self", ".", "_bfs_path_states", "(", "graph", ",", "sorted", "(", "graph", ".", "states", ",", "key", "=", "attrgetter", "(", "'initial'", ")", ",", "reverse", "=", "True", ")", "[", "0", "]", ")", "# Find Q", "set_q", "=", "set", "(", "self", ".", "_object_set_to_state_list", "(", "graph", ".", "states", ")", ")", "# We will work with states addresses here instead of states stateid for", "# more convenience", "set_f", "=", "set", "(", "self", ".", "_object_set_to_state_list", "(", "self", ".", "_get_accepted", "(", "graph", ")", ")", ")", "# Perform P := {F, Q-F}", "set_nf", "=", "set_q", ".", "copy", "(", ")", "-", "set_f", ".", "copy", "(", ")", "self", ".", "groups", "=", "[", "set_f", ".", "copy", "(", ")", ",", "set_nf", ".", "copy", "(", ")", "]", "self", ".", "bookeeping", "=", "[", "(", "set_f", ",", "set_nf", ",", "''", ")", "]", "done", "=", "False", "while", "not", "done", ":", "done", "=", "True", "new_groups", "=", "[", "]", "for", "selectgroup", "in", "self", ".", "groups", ":", "# _check for each letter if it splits the current group", "for", "character", "in", "self", ".", "alphabet", ":", "# print 'Testing symbol: ', c", "target", "=", "defaultdict", "(", "list", ")", "target_states", "=", "defaultdict", "(", "int", ")", "new_g", "=", "[", "set", "(", "selectgroup", ")", "]", "for", "sid", "in", "selectgroup", ":", "# _check if all transitions using c are going in a state", "# in the same group. If they are going on a different", "# group then split", "deststate", "=", "self", ".", "_delta", "(", "graph", ",", "graph", "[", "sid", "]", ",", "character", ")", "destgroup", "=", "self", ".", "_get_group_from_state", "(", "deststate", ".", "stateid", ")", "target", "[", "destgroup", "]", ".", "append", "(", "sid", ")", "target_states", "[", "destgroup", "]", "=", "deststate", ".", "stateid", "if", "len", "(", "target", ")", ">", "1", ":", "inv_target_states", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "target_states", ".", "iteritems", "(", ")", "}", "new_g", "=", "[", "set", "(", "selectedstate", ")", "for", "selectedstate", "in", "target", ".", "values", "(", ")", "]", "done", "=", "False", "# Get all the partitions of destgroups", "queue", "=", "[", "set", "(", "[", "x", "for", "x", "in", "target_states", ".", "values", "(", ")", "]", ")", "]", "while", "queue", ":", "top", "=", "queue", ".", "pop", "(", "0", ")", "(", "group1", ",", "group2", ",", "distinguish_string", ")", "=", "self", ".", "_partition_group", "(", "top", ")", "ng1", "=", "self", ".", "_reverse_to_source", "(", "target", ",", "[", "inv_target_states", "[", "x", "]", "for", "x", "in", "group1", "]", ")", "ng2", "=", "self", ".", "_reverse_to_source", "(", "target", ",", "[", "inv_target_states", "[", "x", "]", "for", "x", "in", "group2", "]", ")", "dist_string", "=", "character", "+", "distinguish_string", "self", ".", "bookeeping", ".", "append", "(", "(", "ng1", ",", "ng2", ",", "dist_string", ")", ")", "if", "len", "(", "group1", ")", ">", "1", ":", "queue", ".", "append", "(", "group1", ")", "if", "len", "(", "group2", ")", ">", "1", ":", "queue", ".", "append", "(", "group2", ")", "break", "new_groups", "+=", "new_g", "# End of iteration for the k-equivalence", "# Assign new groups and check if any change occured", "self", ".", "groups", "=", "new_groups", "sm_vector", "=", "[", "i", "for", "(", "a", ",", "i", ")", "in", "sorted", "(", "self", ".", "access_strings_map", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "1", "]", ")", ")", "]", "if", "not", "sfa", ":", "smi_vector", "=", "[", "'{}{}'", ".", "format", "(", "a", ",", "b", ")", "for", "b", "in", "self", ".", "alphabet", "for", "a", "in", "sm_vector", "]", "else", ":", "smi_vector", "=", "self", ".", "_init_smi", "(", "graph", ",", "self", ".", "access_strings_map", ")", "em_vector", "=", "[", "distinguish_string", "for", "(", "_", ",", "_", ",", "distinguish_string", ")", "in", "self", ".", "bookeeping", "]", "return", "sm_vector", ",", "smi_vector", ",", "em_vector" ]
Args: given_graph (DFA): The DFA states sfa (boolean): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors
[ "Args", ":", "given_graph", "(", "DFA", ")", ":", "The", "DFA", "states", "sfa", "(", "boolean", ")", ":", "A", "boolean", "for", "chosing", "SFA", "Return", ":", "list", "list", "list", ":", "sm_vector", "smi_vector", "em_vector", "initialization", "vectors" ]
python
train
44.681818
O365/python-o365
O365/drive.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/drive.py#L549-L577
def get_thumbnails(self, size=None): """ Returns this Item Thumbnails. Thumbnails are not supported on SharePoint Server 2016. :param size: request only the specified size: ej: "small", Custom 300x400 px: "c300x400", Crop: "c300x400_Crop" :return: Thumbnail Data :rtype: dict """ if not self.object_id: return [] url = self.build_url( self._endpoints.get('thumbnails').format(id=self.object_id)) params = {} if size is not None: params['select'] = size response = self.con.get(url, params=params) if not response: return [] data = response.json() if not self.thumbnails or size is None: self.thumbnails = data return data
[ "def", "get_thumbnails", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "object_id", ":", "return", "[", "]", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'thumbnails'", ")", ".", "format", "(", "id", "=", "self", ".", "object_id", ")", ")", "params", "=", "{", "}", "if", "size", "is", "not", "None", ":", "params", "[", "'select'", "]", "=", "size", "response", "=", "self", ".", "con", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "not", "response", ":", "return", "[", "]", "data", "=", "response", ".", "json", "(", ")", "if", "not", "self", ".", "thumbnails", "or", "size", "is", "None", ":", "self", ".", "thumbnails", "=", "data", "return", "data" ]
Returns this Item Thumbnails. Thumbnails are not supported on SharePoint Server 2016. :param size: request only the specified size: ej: "small", Custom 300x400 px: "c300x400", Crop: "c300x400_Crop" :return: Thumbnail Data :rtype: dict
[ "Returns", "this", "Item", "Thumbnails", ".", "Thumbnails", "are", "not", "supported", "on", "SharePoint", "Server", "2016", "." ]
python
train
27.206897
maraujop/requests-oauth2
requests_oauth2/oauth2.py
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L41-L46
def _check_configuration(self, *attrs): """Check that each named attr has been configured """ for attr in attrs: if getattr(self, attr, None) is None: raise ConfigurationError("{} not configured".format(attr))
[ "def", "_check_configuration", "(", "self", ",", "*", "attrs", ")", ":", "for", "attr", "in", "attrs", ":", "if", "getattr", "(", "self", ",", "attr", ",", "None", ")", "is", "None", ":", "raise", "ConfigurationError", "(", "\"{} not configured\"", ".", "format", "(", "attr", ")", ")" ]
Check that each named attr has been configured
[ "Check", "that", "each", "named", "attr", "has", "been", "configured" ]
python
train
42.666667