repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
jantman/awslimitchecker
awslimitchecker/services/ses.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/ses.py#L56-L80
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) for lim in self.limits.values(): lim._reset_usage() try: self.connect() resp = self.conn.get_send_quota() except EndpointConnectionError as ex: logger.warning('Skipping SES: %s', str(ex)) return except ClientError as ex: if ex.response['Error']['Code'] in ['AccessDenied', '503']: logger.warning('Skipping SES: %s', ex) return raise self.limits['Daily sending quota']._add_current_usage( resp['SentLast24Hours'] ) self._have_usage = True logger.debug("Done checking usage.")
[ "def", "find_usage", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Checking usage for service %s\"", ",", "self", ".", "service_name", ")", "for", "lim", "in", "self", ".", "limits", ".", "values", "(", ")", ":", "lim", ".", "_reset_usage", "(", ")", "try", ":", "self", ".", "connect", "(", ")", "resp", "=", "self", ".", "conn", ".", "get_send_quota", "(", ")", "except", "EndpointConnectionError", "as", "ex", ":", "logger", ".", "warning", "(", "'Skipping SES: %s'", ",", "str", "(", "ex", ")", ")", "return", "except", "ClientError", "as", "ex", ":", "if", "ex", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "in", "[", "'AccessDenied'", ",", "'503'", "]", ":", "logger", ".", "warning", "(", "'Skipping SES: %s'", ",", "ex", ")", "return", "raise", "self", ".", "limits", "[", "'Daily sending quota'", "]", ".", "_add_current_usage", "(", "resp", "[", "'SentLast24Hours'", "]", ")", "self", ".", "_have_usage", "=", "True", "logger", ".", "debug", "(", "\"Done checking usage.\"", ")" ]
Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`.
[ "Determine", "the", "current", "usage", "for", "each", "limit", "of", "this", "service", "and", "update", "corresponding", "Limit", "via", ":", "py", ":", "meth", ":", "~", ".", "AwsLimit", ".", "_add_current_usage", "." ]
python
train
36.88
veltzer/pypitools
pypitools/common.py
https://github.com/veltzer/pypitools/blob/5f097be21e9bc65578eed5b6b7855c1945540701/pypitools/common.py#L187-L205
def upload_by_gemfury(self): """ upload to gemfury :return: """ check_call_no_output([ '{}'.format(self.python), 'setup.py', 'sdist', ]) filename = self.get_package_filename() # The command line is the one recommended by gemfury at # https://manage.fury.io/dashboard/[username]/push check_call_no_output([ 'fury', 'push', filename, '--as={}'.format(self.gemfury_user), ])
[ "def", "upload_by_gemfury", "(", "self", ")", ":", "check_call_no_output", "(", "[", "'{}'", ".", "format", "(", "self", ".", "python", ")", ",", "'setup.py'", ",", "'sdist'", ",", "]", ")", "filename", "=", "self", ".", "get_package_filename", "(", ")", "# The command line is the one recommended by gemfury at", "# https://manage.fury.io/dashboard/[username]/push", "check_call_no_output", "(", "[", "'fury'", ",", "'push'", ",", "filename", ",", "'--as={}'", ".", "format", "(", "self", ".", "gemfury_user", ")", ",", "]", ")" ]
upload to gemfury :return:
[ "upload", "to", "gemfury", ":", "return", ":" ]
python
train
27.631579
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L6138-L6147
def ToBitmap(self, x: int = 0, y: int = 0, width: int = 0, height: int = 0) -> Bitmap: """ Capture control to a Bitmap object. x, y: int, the point in control's internal position(from 0,0). width, height: int, image's width and height from x, y, use 0 for entire area. If width(or height) < 0, image size will be control's width(or height) - width(or height). """ bitmap = Bitmap() bitmap.FromControl(self, x, y, width, height) return bitmap
[ "def", "ToBitmap", "(", "self", ",", "x", ":", "int", "=", "0", ",", "y", ":", "int", "=", "0", ",", "width", ":", "int", "=", "0", ",", "height", ":", "int", "=", "0", ")", "->", "Bitmap", ":", "bitmap", "=", "Bitmap", "(", ")", "bitmap", ".", "FromControl", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ")", "return", "bitmap" ]
Capture control to a Bitmap object. x, y: int, the point in control's internal position(from 0,0). width, height: int, image's width and height from x, y, use 0 for entire area. If width(or height) < 0, image size will be control's width(or height) - width(or height).
[ "Capture", "control", "to", "a", "Bitmap", "object", ".", "x", "y", ":", "int", "the", "point", "in", "control", "s", "internal", "position", "(", "from", "0", "0", ")", ".", "width", "height", ":", "int", "image", "s", "width", "and", "height", "from", "x", "y", "use", "0", "for", "entire", "area", ".", "If", "width", "(", "or", "height", ")", "<", "0", "image", "size", "will", "be", "control", "s", "width", "(", "or", "height", ")", "-", "width", "(", "or", "height", ")", "." ]
python
valid
51.9
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/utils.py
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L51-L60
def _generate_password(): """Create a random password The password is made by taking a uuid and passing it though sha1sum. We may change this in future to gain more entropy. This is based on the tripleo command os-make-password """ uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8") return hashlib.sha1(uuid_str).hexdigest()
[ "def", "_generate_password", "(", ")", ":", "uuid_str", "=", "six", ".", "text_type", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "encode", "(", "\"UTF-8\"", ")", "return", "hashlib", ".", "sha1", "(", "uuid_str", ")", ".", "hexdigest", "(", ")" ]
Create a random password The password is made by taking a uuid and passing it though sha1sum. We may change this in future to gain more entropy. This is based on the tripleo command os-make-password
[ "Create", "a", "random", "password" ]
python
train
34.9
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L688-L710
def boost(self, dtrain, grad, hess): """ Boost the booster for one iteration, with customized gradient statistics. Parameters ---------- dtrain : DMatrix The training DMatrix. grad : list The first order of gradient. hess : list The second order of gradient. """ if len(grad) != len(hess): raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess))) if not isinstance(dtrain, DMatrix): raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__)) self._validate_features(dtrain) _check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle, c_array(ctypes.c_float, grad), c_array(ctypes.c_float, hess), len(grad)))
[ "def", "boost", "(", "self", ",", "dtrain", ",", "grad", ",", "hess", ")", ":", "if", "len", "(", "grad", ")", "!=", "len", "(", "hess", ")", ":", "raise", "ValueError", "(", "'grad / hess length mismatch: {} / {}'", ".", "format", "(", "len", "(", "grad", ")", ",", "len", "(", "hess", ")", ")", ")", "if", "not", "isinstance", "(", "dtrain", ",", "DMatrix", ")", ":", "raise", "TypeError", "(", "'invalid training matrix: {}'", ".", "format", "(", "type", "(", "dtrain", ")", ".", "__name__", ")", ")", "self", ".", "_validate_features", "(", "dtrain", ")", "_check_call", "(", "_LIB", ".", "XGBoosterBoostOneIter", "(", "self", ".", "handle", ",", "dtrain", ".", "handle", ",", "c_array", "(", "ctypes", ".", "c_float", ",", "grad", ")", ",", "c_array", "(", "ctypes", ".", "c_float", ",", "hess", ")", ",", "len", "(", "grad", ")", ")", ")" ]
Boost the booster for one iteration, with customized gradient statistics. Parameters ---------- dtrain : DMatrix The training DMatrix. grad : list The first order of gradient. hess : list The second order of gradient.
[ "Boost", "the", "booster", "for", "one", "iteration", "with", "customized", "gradient", "statistics", "." ]
python
train
40.782609
eng-tools/sfsimodels
sfsimodels/models/soils.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1112-L1123
def get_v_total_stress_at_depth(self, z): """ Determine the vertical total stress at depth z, where z can be a number or an array of numbers. """ if not hasattr(z, "__len__"): return self.one_vertical_total_stress(z) else: sigma_v_effs = [] for value in z: sigma_v_effs.append(self.one_vertical_total_stress(value)) return np.array(sigma_v_effs)
[ "def", "get_v_total_stress_at_depth", "(", "self", ",", "z", ")", ":", "if", "not", "hasattr", "(", "z", ",", "\"__len__\"", ")", ":", "return", "self", ".", "one_vertical_total_stress", "(", "z", ")", "else", ":", "sigma_v_effs", "=", "[", "]", "for", "value", "in", "z", ":", "sigma_v_effs", ".", "append", "(", "self", ".", "one_vertical_total_stress", "(", "value", ")", ")", "return", "np", ".", "array", "(", "sigma_v_effs", ")" ]
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
[ "Determine", "the", "vertical", "total", "stress", "at", "depth", "z", "where", "z", "can", "be", "a", "number", "or", "an", "array", "of", "numbers", "." ]
python
train
36.583333
google/jsonnet
case_studies/micro_fractal/tilegen/mandelbrot_service.py
https://github.com/google/jsonnet/blob/c323f5ce5b8aa663585d23dc0fb94d4b166c6f16/case_studies/micro_fractal/tilegen/mandelbrot_service.py#L66-L104
def handle_fractal(): """Get fractal coordinates from query string, call mandelbrot to generate image. Returns: The image, wrapped in an HTML response. """ if check_etag(): return flask.make_response(), 304 level = int(flask.request.args.get("l", "0")) x = float(int(flask.request.args.get("x", "0"))) y = float(int(flask.request.args.get("y", "0"))) if level < 0: level = 0 grid_size = math.pow(2, level) x0 = "%.30g" % ((x - 0) / grid_size) y0 = "%.30g" % ((y - 0) / grid_size) x1 = "%.30g" % ((x + 1) / grid_size) y1 = "%.30g" % ((y + 1) / grid_size) print "Tile: %s %s %s %s" % (x0, y0, x1, y1) width = str(CONF['width']) height = str(CONF['height']) iters = str(CONF['iters']) cmd = ['./mandelbrot', width, height, iters, x0, y0, x1, y1] image_data = subprocess.check_output(cmd) response = flask.make_response(image_data) response.headers["Content-Type"] = "image/png" response.headers["cache-control"] = "public, max-age=600" response.headers["ETag"] = ETAG return response
[ "def", "handle_fractal", "(", ")", ":", "if", "check_etag", "(", ")", ":", "return", "flask", ".", "make_response", "(", ")", ",", "304", "level", "=", "int", "(", "flask", ".", "request", ".", "args", ".", "get", "(", "\"l\"", ",", "\"0\"", ")", ")", "x", "=", "float", "(", "int", "(", "flask", ".", "request", ".", "args", ".", "get", "(", "\"x\"", ",", "\"0\"", ")", ")", ")", "y", "=", "float", "(", "int", "(", "flask", ".", "request", ".", "args", ".", "get", "(", "\"y\"", ",", "\"0\"", ")", ")", ")", "if", "level", "<", "0", ":", "level", "=", "0", "grid_size", "=", "math", ".", "pow", "(", "2", ",", "level", ")", "x0", "=", "\"%.30g\"", "%", "(", "(", "x", "-", "0", ")", "/", "grid_size", ")", "y0", "=", "\"%.30g\"", "%", "(", "(", "y", "-", "0", ")", "/", "grid_size", ")", "x1", "=", "\"%.30g\"", "%", "(", "(", "x", "+", "1", ")", "/", "grid_size", ")", "y1", "=", "\"%.30g\"", "%", "(", "(", "y", "+", "1", ")", "/", "grid_size", ")", "print", "\"Tile: %s %s %s %s\"", "%", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "width", "=", "str", "(", "CONF", "[", "'width'", "]", ")", "height", "=", "str", "(", "CONF", "[", "'height'", "]", ")", "iters", "=", "str", "(", "CONF", "[", "'iters'", "]", ")", "cmd", "=", "[", "'./mandelbrot'", ",", "width", ",", "height", ",", "iters", ",", "x0", ",", "y0", ",", "x1", ",", "y1", "]", "image_data", "=", "subprocess", ".", "check_output", "(", "cmd", ")", "response", "=", "flask", ".", "make_response", "(", "image_data", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "\"image/png\"", "response", ".", "headers", "[", "\"cache-control\"", "]", "=", "\"public, max-age=600\"", "response", ".", "headers", "[", "\"ETag\"", "]", "=", "ETAG", "return", "response" ]
Get fractal coordinates from query string, call mandelbrot to generate image. Returns: The image, wrapped in an HTML response.
[ "Get", "fractal", "coordinates", "from", "query", "string", "call", "mandelbrot", "to", "generate", "image", "." ]
python
train
27.512821
mila-iqia/picklable-itertools
picklable_itertools/iter_dispatch.py
https://github.com/mila-iqia/picklable-itertools/blob/e00238867875df0258cf4f83f528d846e7c1afc4/picklable_itertools/iter_dispatch.py#L15-L41
def iter_(obj): """A custom replacement for iter(), dispatching a few custom picklable iterators for known types. """ if six.PY2: file_types = file, # noqa if six.PY3: file_types = io.IOBase, dict_items = {}.items().__class__ dict_values = {}.values().__class__ dict_keys = {}.keys().__class__ dict_view = (dict_items, dict_values, dict_keys) if isinstance(obj, dict): return ordered_sequence_iterator(list(obj.keys())) if isinstance(obj, file_types): return file_iterator(obj) if six.PY2: if isinstance(obj, (list, tuple)): return ordered_sequence_iterator(obj) if isinstance(obj, xrange): # noqa return range_iterator(obj) if NUMPY_AVAILABLE and isinstance(obj, numpy.ndarray): return ordered_sequence_iterator(obj) if six.PY3 and isinstance(obj, dict_view): return ordered_sequence_iterator(list(obj)) return iter(obj)
[ "def", "iter_", "(", "obj", ")", ":", "if", "six", ".", "PY2", ":", "file_types", "=", "file", ",", "# noqa", "if", "six", ".", "PY3", ":", "file_types", "=", "io", ".", "IOBase", ",", "dict_items", "=", "{", "}", ".", "items", "(", ")", ".", "__class__", "dict_values", "=", "{", "}", ".", "values", "(", ")", ".", "__class__", "dict_keys", "=", "{", "}", ".", "keys", "(", ")", ".", "__class__", "dict_view", "=", "(", "dict_items", ",", "dict_values", ",", "dict_keys", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "ordered_sequence_iterator", "(", "list", "(", "obj", ".", "keys", "(", ")", ")", ")", "if", "isinstance", "(", "obj", ",", "file_types", ")", ":", "return", "file_iterator", "(", "obj", ")", "if", "six", ".", "PY2", ":", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "ordered_sequence_iterator", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "xrange", ")", ":", "# noqa", "return", "range_iterator", "(", "obj", ")", "if", "NUMPY_AVAILABLE", "and", "isinstance", "(", "obj", ",", "numpy", ".", "ndarray", ")", ":", "return", "ordered_sequence_iterator", "(", "obj", ")", "if", "six", ".", "PY3", "and", "isinstance", "(", "obj", ",", "dict_view", ")", ":", "return", "ordered_sequence_iterator", "(", "list", "(", "obj", ")", ")", "return", "iter", "(", "obj", ")" ]
A custom replacement for iter(), dispatching a few custom picklable iterators for known types.
[ "A", "custom", "replacement", "for", "iter", "()", "dispatching", "a", "few", "custom", "picklable", "iterators", "for", "known", "types", "." ]
python
train
35.925926
rocky/python3-trepan
trepan/lib/sighandler.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/sighandler.py#L275-L282
def check_and_adjust_sighandlers(self): """Check to see if any of the signal handlers we are interested in have changed or is not initially set. Change any that are not right. """ for signame in list(self.sigs.keys()): if not self.check_and_adjust_sighandler(signame, self.sigs): break pass return
[ "def", "check_and_adjust_sighandlers", "(", "self", ")", ":", "for", "signame", "in", "list", "(", "self", ".", "sigs", ".", "keys", "(", ")", ")", ":", "if", "not", "self", ".", "check_and_adjust_sighandler", "(", "signame", ",", "self", ".", "sigs", ")", ":", "break", "pass", "return" ]
Check to see if any of the signal handlers we are interested in have changed or is not initially set. Change any that are not right.
[ "Check", "to", "see", "if", "any", "of", "the", "signal", "handlers", "we", "are", "interested", "in", "have", "changed", "or", "is", "not", "initially", "set", ".", "Change", "any", "that", "are", "not", "right", "." ]
python
test
45.25
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_ras.py#L141-L155
def logging_syslog_server_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras") syslog_server = ET.SubElement(logging, "syslog-server") syslogip_key = ET.SubElement(syslog_server, "syslogip") syslogip_key.text = kwargs.pop('syslogip') use_vrf_key = ET.SubElement(syslog_server, "use-vrf") use_vrf_key.text = kwargs.pop('use_vrf') port = ET.SubElement(syslog_server, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logging_syslog_server_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logging", "=", "ET", ".", "SubElement", "(", "config", ",", "\"logging\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ras\"", ")", "syslog_server", "=", "ET", ".", "SubElement", "(", "logging", ",", "\"syslog-server\"", ")", "syslogip_key", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"syslogip\"", ")", "syslogip_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'syslogip'", ")", "use_vrf_key", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"use-vrf\"", ")", "use_vrf_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'use_vrf'", ")", "port", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"port\"", ")", "port", ".", "text", "=", "kwargs", ".", "pop", "(", "'port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
45.266667
zqfang/GSEApy
gseapy/enrichr.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L219-L248
def get_organism(self): """Select Enrichr organism from below: Human & Mouse: H. sapiens & M. musculus Fly: D. melanogaster Yeast: S. cerevisiae Worm: C. elegans Fish: D. rerio """ organism = {'default': ['', 'hs', 'mm', 'human','mouse', 'homo sapiens', 'mus musculus', 'h. sapiens', 'm. musculus'], 'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'], 'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'], 'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'], 'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish'] } for k, v in organism.items(): if self.organism.lower() in v : self._organism = k if self._organism is None: raise Exception("No supported organism found !!!") if self._organism == 'default': self._organism = '' return
[ "def", "get_organism", "(", "self", ")", ":", "organism", "=", "{", "'default'", ":", "[", "''", ",", "'hs'", ",", "'mm'", ",", "'human'", ",", "'mouse'", ",", "'homo sapiens'", ",", "'mus musculus'", ",", "'h. sapiens'", ",", "'m. musculus'", "]", ",", "'Fly'", ":", "[", "'fly'", ",", "'d. melanogaster'", ",", "'drosophila melanogaster'", "]", ",", "'Yeast'", ":", "[", "'yeast'", ",", "'s. cerevisiae'", ",", "'saccharomyces cerevisiae'", "]", ",", "'Worm'", ":", "[", "'worm'", ",", "'c. elegans'", ",", "'caenorhabditis elegans'", ",", "'nematode'", "]", ",", "'Fish'", ":", "[", "'fish'", ",", "'d. rerio'", ",", "'danio rerio'", ",", "'zebrafish'", "]", "}", "for", "k", ",", "v", "in", "organism", ".", "items", "(", ")", ":", "if", "self", ".", "organism", ".", "lower", "(", ")", "in", "v", ":", "self", ".", "_organism", "=", "k", "if", "self", ".", "_organism", "is", "None", ":", "raise", "Exception", "(", "\"No supported organism found !!!\"", ")", "if", "self", ".", "_organism", "==", "'default'", ":", "self", ".", "_organism", "=", "''", "return" ]
Select Enrichr organism from below: Human & Mouse: H. sapiens & M. musculus Fly: D. melanogaster Yeast: S. cerevisiae Worm: C. elegans Fish: D. rerio
[ "Select", "Enrichr", "organism", "from", "below", ":" ]
python
test
35.8
sony/nnabla
python/src/nnabla/parametric_functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L800-L934
def cpd3_convolution(inp, outmaps, kernel, r, pad=None, stride=None, dilation=None, oik_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, max_iter=500, stopping_criterion=1e-5, lambda_reg=0.0): """CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps. The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form: .. math:: \\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}}, where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product. If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used. Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and the compression rate you want to specify is :math:`{CR}`, then you set :math:`{R}` as .. math:: R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor. References: - Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014). - Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). r (int): rank of the factorized layer pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. max_iter (int): Max iteration of the ALS. stopping_criterion (float): Threshold for stopping the ALS. If the value is negative, the convergence check is ignored; in other words, it may reduce the computation time. lambda_reg (float): regularization parameter for the ALS. Larger lambda_reg means larger regularization. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) """ if oik_init is None: oik_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if type(oik_init) is np.ndarray: # TODO: Assert that size of uv_init is correct # uv is initialize with numpy array oik = oik_init else: # uv is initialize from initializer oik = oik_init((outmaps, inp.shape[base_axis]) + tuple(kernel)) # flatten kernels oik = oik.reshape((outmaps, inp.shape[base_axis], np.prod(kernel))) o = get_parameter('O') i = get_parameter('I') k = get_parameter('K') if (o is None) or (i is None) or (k is None): assert r > 0, "cpd3_convolution: The rank must larger than zero" from nnabla.utils.factorization import cpd als = cpd.ALS() U, lmbda = als.solve(X=oik, rank=r, max_iter=max_iter, stopping_criterion=stopping_criterion, lambda_reg=lambda_reg, dtype=oik.dtype, rng=rng) o_ = U[0] * lmbda i_ = U[1] k_ = U[2] kernel_one = (1,) * len(kernel) # 1x1 for 2D convolution inmaps = inp.shape[base_axis] # reshape I : (I,r) -> (r,I,1,1) i = nn.Variable((r, inmaps) + kernel_one, need_grad=True) i.d = np.transpose(i_).reshape((r, inmaps) + kernel_one) nn.parameter.set_parameter("I", i) # reshape O : (O,r) -> (O,r,1,1) o = nn.Variable((outmaps, r) + kernel_one, need_grad=True) o.d = o_.reshape((outmaps, r) + kernel_one) nn.parameter.set_parameter("O", o) # reshape K : (K*K,r) -> (r,K,K) k = nn.Variable((r,) + kernel, need_grad=True) k.d = np.transpose(k_).reshape((r,) + kernel) nn.parameter.set_parameter("K", k) if fix_parameters == o.need_grad: o = o.get_unlinked_variable(need_grad=not fix_parameters) if fix_parameters == i.need_grad: i = i.get_unlinked_variable(need_grad=not fix_parameters) if fix_parameters == k.need_grad: k = k.get_unlinked_variable(need_grad=not fix_parameters) if with_bias and b_init is None: b_init = ConstantInitializer() b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) y = F.convolution(inp, i, bias=None, base_axis=base_axis, pad=None, stride=None, dilation=None, group=1) y = F.depthwise_convolution(y, k, bias=None, base_axis=base_axis, pad=pad, stride=stride, dilation=dilation, multiplier=1) y = F.convolution(y, o, bias=b, base_axis=base_axis, pad=None, stride=None, dilation=None, group=1) return y
[ "def", "cpd3_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "r", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "oik_init", "=", "None", ",", "b_init", "=", "None", ",", "base_axis", "=", "1", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ",", "max_iter", "=", "500", ",", "stopping_criterion", "=", "1e-5", ",", "lambda_reg", "=", "0.0", ")", ":", "if", "oik_init", "is", "None", ":", "oik_init", "=", "UniformInitializer", "(", "calc_uniform_lim_glorot", "(", "inp", ".", "shape", "[", "base_axis", "]", ",", "outmaps", ",", "tuple", "(", "kernel", ")", ")", ",", "rng", "=", "rng", ")", "if", "type", "(", "oik_init", ")", "is", "np", ".", "ndarray", ":", "# TODO: Assert that size of uv_init is correct", "# uv is initialize with numpy array", "oik", "=", "oik_init", "else", ":", "# uv is initialize from initializer", "oik", "=", "oik_init", "(", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", ")", "+", "tuple", "(", "kernel", ")", ")", "# flatten kernels", "oik", "=", "oik", ".", "reshape", "(", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", ",", "np", ".", "prod", "(", "kernel", ")", ")", ")", "o", "=", "get_parameter", "(", "'O'", ")", "i", "=", "get_parameter", "(", "'I'", ")", "k", "=", "get_parameter", "(", "'K'", ")", "if", "(", "o", "is", "None", ")", "or", "(", "i", "is", "None", ")", "or", "(", "k", "is", "None", ")", ":", "assert", "r", ">", "0", ",", "\"cpd3_convolution: The rank must larger than zero\"", "from", "nnabla", ".", "utils", ".", "factorization", "import", "cpd", "als", "=", "cpd", ".", "ALS", "(", ")", "U", ",", "lmbda", "=", "als", ".", "solve", "(", "X", "=", "oik", ",", "rank", "=", "r", ",", "max_iter", "=", "max_iter", ",", "stopping_criterion", "=", "stopping_criterion", ",", "lambda_reg", "=", "lambda_reg", ",", "dtype", "=", "oik", ".", "dtype", ",", "rng", "=", "rng", ")", "o_", "=", "U", "[", "0", "]", "*", "lmbda", "i_", "=", "U", "[", "1", "]", "k_", "=", "U", "[", "2", "]", "kernel_one", "=", "(", "1", ",", ")", "*", "len", "(", "kernel", ")", "# 1x1 for 2D convolution", "inmaps", "=", "inp", ".", "shape", "[", "base_axis", "]", "# reshape I : (I,r) -> (r,I,1,1)", "i", "=", "nn", ".", "Variable", "(", "(", "r", ",", "inmaps", ")", "+", "kernel_one", ",", "need_grad", "=", "True", ")", "i", ".", "d", "=", "np", ".", "transpose", "(", "i_", ")", ".", "reshape", "(", "(", "r", ",", "inmaps", ")", "+", "kernel_one", ")", "nn", ".", "parameter", ".", "set_parameter", "(", "\"I\"", ",", "i", ")", "# reshape O : (O,r) -> (O,r,1,1)", "o", "=", "nn", ".", "Variable", "(", "(", "outmaps", ",", "r", ")", "+", "kernel_one", ",", "need_grad", "=", "True", ")", "o", ".", "d", "=", "o_", ".", "reshape", "(", "(", "outmaps", ",", "r", ")", "+", "kernel_one", ")", "nn", ".", "parameter", ".", "set_parameter", "(", "\"O\"", ",", "o", ")", "# reshape K : (K*K,r) -> (r,K,K)", "k", "=", "nn", ".", "Variable", "(", "(", "r", ",", ")", "+", "kernel", ",", "need_grad", "=", "True", ")", "k", ".", "d", "=", "np", ".", "transpose", "(", "k_", ")", ".", "reshape", "(", "(", "r", ",", ")", "+", "kernel", ")", "nn", ".", "parameter", ".", "set_parameter", "(", "\"K\"", ",", "k", ")", "if", "fix_parameters", "==", "o", ".", "need_grad", ":", "o", "=", "o", ".", "get_unlinked_variable", "(", "need_grad", "=", "not", "fix_parameters", ")", "if", "fix_parameters", "==", "i", ".", "need_grad", ":", "i", "=", "i", ".", "get_unlinked_variable", "(", "need_grad", "=", "not", "fix_parameters", ")", "if", "fix_parameters", "==", "k", ".", "need_grad", ":", "k", "=", "k", ".", "get_unlinked_variable", "(", "need_grad", "=", "not", "fix_parameters", ")", "if", "with_bias", "and", "b_init", "is", "None", ":", "b_init", "=", "ConstantInitializer", "(", ")", "b", "=", "None", "if", "with_bias", ":", "b", "=", "get_parameter_or_create", "(", "\"b\"", ",", "(", "outmaps", ",", ")", ",", "b_init", ",", "True", ",", "not", "fix_parameters", ")", "y", "=", "F", ".", "convolution", "(", "inp", ",", "i", ",", "bias", "=", "None", ",", "base_axis", "=", "base_axis", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ")", "y", "=", "F", ".", "depthwise_convolution", "(", "y", ",", "k", ",", "bias", "=", "None", ",", "base_axis", "=", "base_axis", ",", "pad", "=", "pad", ",", "stride", "=", "stride", ",", "dilation", "=", "dilation", ",", "multiplier", "=", "1", ")", "y", "=", "F", ".", "convolution", "(", "y", ",", "o", ",", "bias", "=", "b", ",", "base_axis", "=", "base_axis", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ")", "return", "y" ]
CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps. The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form: .. math:: \\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}}, where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product. If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used. Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and the compression rate you want to specify is :math:`{CR}`, then you set :math:`{R}` as .. math:: R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor. References: - Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014). - Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). r (int): rank of the factorized layer pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. max_iter (int): Max iteration of the ALS. stopping_criterion (float): Threshold for stopping the ALS. If the value is negative, the convergence check is ignored; in other words, it may reduce the computation time. lambda_reg (float): regularization parameter for the ALS. Larger lambda_reg means larger regularization. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
[ "CP", "convolution", "is", "a", "low", "rank", "approximation", "of", "a", "convolution", "layer", ".", "A", "3D", "tensor", "containing", "the", "parameter", "is", "built", "by", "collapsing", "the", "N", "-", "D", "kernels", "into", "1D", "then", "the", "tensor", "is", "decomposed", "into", "three", "matrices", ".", "The", "decomposed", "layer", "can", "be", "seen", "as", "linear", "combinations", "of", "the", "input", "feature", "maps", "to", ":", "math", ":", "{", "R", "}", "feature", "maps", "followed", "by", "a", "depthwise", "convolution", "and", "followed", "by", "linear", "combinations", "of", "the", "feature", "maps", "to", "compute", "the", "output", "feature", "maps", "." ]
python
train
50.740741
bitesofcode/projexui
projexui/dialogs/xconfigdialog/xconfigdialog.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xconfigdialog/xconfigdialog.py#L229-L257
def edit( plugins, parent = None, default = None, modal = True ): """ Prompts the user to edit the config settings for the inputed config \ plugins. :param plugins | [<XConfigPlugin>, ..] parent | <QWidget> default | <XConfigPlugin> || None :return <bool> success """ if ( XConfigDialog._instance ): XConfigDialog._instance.show() XConfigDialog._instance.activateWindow() return True dlg = XConfigDialog( parent ) dlg.setPlugins(plugins) dlg.setCurrentPlugin(default) if ( not modal ): XConfigDialog._instance = dlg dlg.setAttribute(Qt.WA_DeleteOnClose) dlg.show() return True if ( dlg.exec_() ): return True return False
[ "def", "edit", "(", "plugins", ",", "parent", "=", "None", ",", "default", "=", "None", ",", "modal", "=", "True", ")", ":", "if", "(", "XConfigDialog", ".", "_instance", ")", ":", "XConfigDialog", ".", "_instance", ".", "show", "(", ")", "XConfigDialog", ".", "_instance", ".", "activateWindow", "(", ")", "return", "True", "dlg", "=", "XConfigDialog", "(", "parent", ")", "dlg", ".", "setPlugins", "(", "plugins", ")", "dlg", ".", "setCurrentPlugin", "(", "default", ")", "if", "(", "not", "modal", ")", ":", "XConfigDialog", ".", "_instance", "=", "dlg", "dlg", ".", "setAttribute", "(", "Qt", ".", "WA_DeleteOnClose", ")", "dlg", ".", "show", "(", ")", "return", "True", "if", "(", "dlg", ".", "exec_", "(", ")", ")", ":", "return", "True", "return", "False" ]
Prompts the user to edit the config settings for the inputed config \ plugins. :param plugins | [<XConfigPlugin>, ..] parent | <QWidget> default | <XConfigPlugin> || None :return <bool> success
[ "Prompts", "the", "user", "to", "edit", "the", "config", "settings", "for", "the", "inputed", "config", "\\", "plugins", ".", ":", "param", "plugins", "|", "[", "<XConfigPlugin", ">", "..", "]", "parent", "|", "<QWidget", ">", "default", "|", "<XConfigPlugin", ">", "||", "None", ":", "return", "<bool", ">", "success" ]
python
train
30.758621
doraemonext/wechat-python-sdk
wechat_sdk/basic.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/basic.py#L565-L575
def get_followers(self, first_user_id=None): """ 获取关注者列表 详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html :param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取 :return: 返回的 JSON 数据包 """ params = dict() if first_user_id: params['next_openid'] = first_user_id return self.request.get('https://api.weixin.qq.com/cgi-bin/user/get', params=params)
[ "def", "get_followers", "(", "self", ",", "first_user_id", "=", "None", ")", ":", "params", "=", "dict", "(", ")", "if", "first_user_id", ":", "params", "[", "'next_openid'", "]", "=", "first_user_id", "return", "self", ".", "request", ".", "get", "(", "'https://api.weixin.qq.com/cgi-bin/user/get'", ",", "params", "=", "params", ")" ]
获取关注者列表 详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html :param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取 :return: 返回的 JSON 数据包
[ "获取关注者列表", "详情请参考", "http", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki", "/", "3", "/", "17e6919a39c1c53555185907acf70093", ".", "html", ":", "param", "first_user_id", ":", "可选。第一个拉取的OPENID,不填默认从头开始拉取", ":", "return", ":", "返回的", "JSON", "数据包" ]
python
valid
39.727273
pywbem/pywbem
pywbem_mock/_resolvermixin.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_resolvermixin.py#L150-L161
def _init_qualifier_decl(qualifier_decl, qual_repo): """ Initialize the flavors of a qualifier declaration if they are not already set. """ assert qualifier_decl.name not in qual_repo if qualifier_decl.tosubclass is None: qualifier_decl.tosubclass = True if qualifier_decl.overridable is None: qualifier_decl.overridable = True if qualifier_decl.translatable is None: qualifier_decl.translatable = False
[ "def", "_init_qualifier_decl", "(", "qualifier_decl", ",", "qual_repo", ")", ":", "assert", "qualifier_decl", ".", "name", "not", "in", "qual_repo", "if", "qualifier_decl", ".", "tosubclass", "is", "None", ":", "qualifier_decl", ".", "tosubclass", "=", "True", "if", "qualifier_decl", ".", "overridable", "is", "None", ":", "qualifier_decl", ".", "overridable", "=", "True", "if", "qualifier_decl", ".", "translatable", "is", "None", ":", "qualifier_decl", ".", "translatable", "=", "False" ]
Initialize the flavors of a qualifier declaration if they are not already set.
[ "Initialize", "the", "flavors", "of", "a", "qualifier", "declaration", "if", "they", "are", "not", "already", "set", "." ]
python
train
41
pyviz/geoviews
geoviews/data/iris.py
https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/data/iris.py#L363-L379
def select(cls, dataset, selection_mask=None, **selection): """ Apply a selection to the data. """ import iris constraint = cls.select_to_constraint(dataset, selection) pre_dim_coords = [c.name() for c in dataset.data.dim_coords] indexed = cls.indexed(dataset, selection) extracted = dataset.data.extract(constraint) if indexed and not extracted.dim_coords: return extracted.data.item() post_dim_coords = [c.name() for c in extracted.dim_coords] dropped = [c for c in pre_dim_coords if c not in post_dim_coords] for d in dropped: extracted = iris.util.new_axis(extracted, d) return extracted
[ "def", "select", "(", "cls", ",", "dataset", ",", "selection_mask", "=", "None", ",", "*", "*", "selection", ")", ":", "import", "iris", "constraint", "=", "cls", ".", "select_to_constraint", "(", "dataset", ",", "selection", ")", "pre_dim_coords", "=", "[", "c", ".", "name", "(", ")", "for", "c", "in", "dataset", ".", "data", ".", "dim_coords", "]", "indexed", "=", "cls", ".", "indexed", "(", "dataset", ",", "selection", ")", "extracted", "=", "dataset", ".", "data", ".", "extract", "(", "constraint", ")", "if", "indexed", "and", "not", "extracted", ".", "dim_coords", ":", "return", "extracted", ".", "data", ".", "item", "(", ")", "post_dim_coords", "=", "[", "c", ".", "name", "(", ")", "for", "c", "in", "extracted", ".", "dim_coords", "]", "dropped", "=", "[", "c", "for", "c", "in", "pre_dim_coords", "if", "c", "not", "in", "post_dim_coords", "]", "for", "d", "in", "dropped", ":", "extracted", "=", "iris", ".", "util", ".", "new_axis", "(", "extracted", ",", "d", ")", "return", "extracted" ]
Apply a selection to the data.
[ "Apply", "a", "selection", "to", "the", "data", "." ]
python
train
41.411765
assemblerflow/flowcraft
flowcraft/generator/recipe.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L691-L730
def list_recipes(full=False): """Method that iterates over all available recipes and prints their information to the standard output Parameters ---------- full : bool If true, it will provide the pipeline string along with the recipe name """ logger.info(colored_print( "\n===== L I S T O F R E C I P E S =====\n", "green_bold")) # This will iterate over all modules included in the recipes subpackage # It will return the import class and the module name, algon with the # correct prefix prefix = "{}.".format(recipes.__name__) for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix): # Import the current module _module = importer.find_module(modname).load_module(modname) # Fetch all available classes in module _recipe_classes = [cls for cls in _module.__dict__.values() if isinstance(cls, type)] # Iterate over each Recipe class, and check for a match with the # provided recipe name. for cls in _recipe_classes: recipe_cls = cls() if hasattr(recipe_cls, "name"): logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold")) if full: logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold")) logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold")) sys.exit(0)
[ "def", "list_recipes", "(", "full", "=", "False", ")", ":", "logger", ".", "info", "(", "colored_print", "(", "\"\\n===== L I S T O F R E C I P E S =====\\n\"", ",", "\"green_bold\"", ")", ")", "# This will iterate over all modules included in the recipes subpackage", "# It will return the import class and the module name, algon with the", "# correct prefix", "prefix", "=", "\"{}.\"", ".", "format", "(", "recipes", ".", "__name__", ")", "for", "importer", ",", "modname", ",", "_", "in", "pkgutil", ".", "iter_modules", "(", "recipes", ".", "__path__", ",", "prefix", ")", ":", "# Import the current module", "_module", "=", "importer", ".", "find_module", "(", "modname", ")", ".", "load_module", "(", "modname", ")", "# Fetch all available classes in module", "_recipe_classes", "=", "[", "cls", "for", "cls", "in", "_module", ".", "__dict__", ".", "values", "(", ")", "if", "isinstance", "(", "cls", ",", "type", ")", "]", "# Iterate over each Recipe class, and check for a match with the", "# provided recipe name.", "for", "cls", "in", "_recipe_classes", ":", "recipe_cls", "=", "cls", "(", ")", "if", "hasattr", "(", "recipe_cls", ",", "\"name\"", ")", ":", "logger", ".", "info", "(", "colored_print", "(", "\"=> {}\"", ".", "format", "(", "recipe_cls", ".", "name", ")", ",", "\"blue_bold\"", ")", ")", "if", "full", ":", "logger", ".", "info", "(", "colored_print", "(", "\"\\t {}\"", ".", "format", "(", "recipe_cls", ".", "__doc__", ")", ",", "\"purple_bold\"", ")", ")", "logger", ".", "info", "(", "colored_print", "(", "\"Pipeline string: {}\\n\"", ".", "format", "(", "recipe_cls", ".", "pipeline_str", ")", ",", "\"yellow_bold\"", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
Method that iterates over all available recipes and prints their information to the standard output Parameters ---------- full : bool If true, it will provide the pipeline string along with the recipe name
[ "Method", "that", "iterates", "over", "all", "available", "recipes", "and", "prints", "their", "information", "to", "the", "standard", "output" ]
python
test
37.175
bcho/bearychat-py
bearychat/incoming.py
https://github.com/bcho/bearychat-py/blob/d492595d6334dfba511f82770995160ee12b5de1/bearychat/incoming.py#L61-L70
def with_text(self, text, markdown=None): '''Set text content. :param text: text content. :param markdown: is markdown? Defaults to ``False``. ''' self._text = text self._markdown = markdown or False return self
[ "def", "with_text", "(", "self", ",", "text", ",", "markdown", "=", "None", ")", ":", "self", ".", "_text", "=", "text", "self", ".", "_markdown", "=", "markdown", "or", "False", "return", "self" ]
Set text content. :param text: text content. :param markdown: is markdown? Defaults to ``False``.
[ "Set", "text", "content", "." ]
python
train
26
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L2438-L2457
def compute_wed(self): """ Computes weight error derivative for all connections in self.connections starting with the last connection. """ if len(self.cacheConnections) != 0: changeConnections = self.cacheConnections else: changeConnections = self.connections for connect in reverse(changeConnections): if connect.active and connect.fromLayer.active and connect.toLayer.active: connect.wed = connect.wed + Numeric.outerproduct(connect.fromLayer.activation, connect.toLayer.delta) if len(self.cacheLayers) != 0: changeLayers = self.cacheLayers else: changeLayers = self.layers for layer in changeLayers: if layer.active: layer.wed = layer.wed + layer.delta
[ "def", "compute_wed", "(", "self", ")", ":", "if", "len", "(", "self", ".", "cacheConnections", ")", "!=", "0", ":", "changeConnections", "=", "self", ".", "cacheConnections", "else", ":", "changeConnections", "=", "self", ".", "connections", "for", "connect", "in", "reverse", "(", "changeConnections", ")", ":", "if", "connect", ".", "active", "and", "connect", ".", "fromLayer", ".", "active", "and", "connect", ".", "toLayer", ".", "active", ":", "connect", ".", "wed", "=", "connect", ".", "wed", "+", "Numeric", ".", "outerproduct", "(", "connect", ".", "fromLayer", ".", "activation", ",", "connect", ".", "toLayer", ".", "delta", ")", "if", "len", "(", "self", ".", "cacheLayers", ")", "!=", "0", ":", "changeLayers", "=", "self", ".", "cacheLayers", "else", ":", "changeLayers", "=", "self", ".", "layers", "for", "layer", "in", "changeLayers", ":", "if", "layer", ".", "active", ":", "layer", ".", "wed", "=", "layer", ".", "wed", "+", "layer", ".", "delta" ]
Computes weight error derivative for all connections in self.connections starting with the last connection.
[ "Computes", "weight", "error", "derivative", "for", "all", "connections", "in", "self", ".", "connections", "starting", "with", "the", "last", "connection", "." ]
python
train
44.85
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L182-L189
def p_statement_if(p): '''statement : IF LPAREN expr RPAREN statement elseif_list else_single | IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI''' if len(p) == 8: p[0] = ast.If(p[3], p[5], p[6], p[7], lineno=p.lineno(1)) else: p[0] = ast.If(p[3], ast.Block(p[6], lineno=p.lineno(5)), p[7], p[8], lineno=p.lineno(1))
[ "def", "p_statement_if", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "8", ":", "p", "[", "0", "]", "=", "ast", ".", "If", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "p", "[", "6", "]", ",", "p", "[", "7", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "else", ":", "p", "[", "0", "]", "=", "ast", ".", "If", "(", "p", "[", "3", "]", ",", "ast", ".", "Block", "(", "p", "[", "6", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "5", ")", ")", ",", "p", "[", "7", "]", ",", "p", "[", "8", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
statement : IF LPAREN expr RPAREN statement elseif_list else_single | IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI
[ "statement", ":", "IF", "LPAREN", "expr", "RPAREN", "statement", "elseif_list", "else_single", "|", "IF", "LPAREN", "expr", "RPAREN", "COLON", "inner_statement_list", "new_elseif_list", "new_else_single", "ENDIF", "SEMI" ]
python
train
52.375
gitpython-developers/GitPython
git/index/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/base.py#L164-L166
def _entries_sorted(self): """:return: list of entries, in a sorted fashion, first by path, then by stage""" return sorted(self.entries.values(), key=lambda e: (e.path, e.stage))
[ "def", "_entries_sorted", "(", "self", ")", ":", "return", "sorted", "(", "self", ".", "entries", ".", "values", "(", ")", ",", "key", "=", "lambda", "e", ":", "(", "e", ".", "path", ",", "e", ".", "stage", ")", ")" ]
:return: list of entries, in a sorted fashion, first by path, then by stage
[ ":", "return", ":", "list", "of", "entries", "in", "a", "sorted", "fashion", "first", "by", "path", "then", "by", "stage" ]
python
train
64
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L115-L134
def summary(self): """Returns the docstring summary for the code element if it exists.""" if self._summary is None: self._summary = "No summary for element." for doc in self.docstring: if doc.doctype == "summary": self._summary = doc.contents break #If a parameter, member or local tag has dimensions or other children, #then the inner-text is not the right thing to use; find a grand-child #summary tag instead. if self._summary == "No summary for element." and len(self.docstring) > 0: summary = self.doc_children("summary") if len(summary) > 0: self._summary = summary[0].contents else: self._summary = self.docstring[0].contents return self._summary
[ "def", "summary", "(", "self", ")", ":", "if", "self", ".", "_summary", "is", "None", ":", "self", ".", "_summary", "=", "\"No summary for element.\"", "for", "doc", "in", "self", ".", "docstring", ":", "if", "doc", ".", "doctype", "==", "\"summary\"", ":", "self", ".", "_summary", "=", "doc", ".", "contents", "break", "#If a parameter, member or local tag has dimensions or other children,", "#then the inner-text is not the right thing to use; find a grand-child", "#summary tag instead.", "if", "self", ".", "_summary", "==", "\"No summary for element.\"", "and", "len", "(", "self", ".", "docstring", ")", ">", "0", ":", "summary", "=", "self", ".", "doc_children", "(", "\"summary\"", ")", "if", "len", "(", "summary", ")", ">", "0", ":", "self", ".", "_summary", "=", "summary", "[", "0", "]", ".", "contents", "else", ":", "self", ".", "_summary", "=", "self", ".", "docstring", "[", "0", "]", ".", "contents", "return", "self", ".", "_summary" ]
Returns the docstring summary for the code element if it exists.
[ "Returns", "the", "docstring", "summary", "for", "the", "code", "element", "if", "it", "exists", "." ]
python
train
43.8
mdgoldberg/sportsref
sportsref/nba/players.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/players.py#L180-L182
def stats_advanced(self, kind='R', summary=False): """Returns a DataFrame of advanced stats.""" return self._get_stats_table('advanced', kind=kind, summary=summary)
[ "def", "stats_advanced", "(", "self", ",", "kind", "=", "'R'", ",", "summary", "=", "False", ")", ":", "return", "self", ".", "_get_stats_table", "(", "'advanced'", ",", "kind", "=", "kind", ",", "summary", "=", "summary", ")" ]
Returns a DataFrame of advanced stats.
[ "Returns", "a", "DataFrame", "of", "advanced", "stats", "." ]
python
test
59.333333
snjoetw/py-synology
synology/api.py
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L186-L200
def camera_disable(self, camera_id, **kwargs): """Disable camera.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'Disable', 'version': 9, 'idList': camera_id, }, **kwargs) print(api['url']) print(payload) response = self._get(api['url'], payload) return response['success']
[ "def", "camera_disable", "(", "self", ",", "camera_id", ",", "*", "*", "kwargs", ")", ":", "api", "=", "self", ".", "_api_info", "[", "'camera'", "]", "payload", "=", "dict", "(", "{", "'_sid'", ":", "self", ".", "_sid", ",", "'api'", ":", "api", "[", "'name'", "]", ",", "'method'", ":", "'Disable'", ",", "'version'", ":", "9", ",", "'idList'", ":", "camera_id", ",", "}", ",", "*", "*", "kwargs", ")", "print", "(", "api", "[", "'url'", "]", ")", "print", "(", "payload", ")", "response", "=", "self", ".", "_get", "(", "api", "[", "'url'", "]", ",", "payload", ")", "return", "response", "[", "'success'", "]" ]
Disable camera.
[ "Disable", "camera", "." ]
python
test
29.133333
ARMmbed/icetea
icetea_lib/LogManager.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/LogManager.py#L699-L733
def filter(self, record): """ Filter record :param record: Record to filter :return: """ def modify(value): """ Modify logged record, truncating it to max length and logging remaining length :param value: Record to modify :return: """ if isinstance(value, six.string_types): if len(value) < ContextFilter.MAXIMUM_LENGTH: return value try: return "{}...[{} more bytes]".format( value[:ContextFilter.REVEAL_LENGTH], len(value) - ContextFilter.REVEAL_LENGTH) except UnicodeError: return "{}...[{} more bytes]".format( repr(value[:ContextFilter.REVEAL_LENGTH]), len(value) - ContextFilter.REVEAL_LENGTH) elif isinstance(value, six.binary_type): return "{}...[{} more bytes]".format( repr(value[:ContextFilter.REVEAL_LENGTH]), len(value) - ContextFilter.REVEAL_LENGTH) else: return value record.msg = traverse_json_obj(record.msg, callback=modify) return True
[ "def", "filter", "(", "self", ",", "record", ")", ":", "def", "modify", "(", "value", ")", ":", "\"\"\"\n Modify logged record, truncating it to max length and logging remaining length\n :param value: Record to modify\n :return:\n \"\"\"", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "if", "len", "(", "value", ")", "<", "ContextFilter", ".", "MAXIMUM_LENGTH", ":", "return", "value", "try", ":", "return", "\"{}...[{} more bytes]\"", ".", "format", "(", "value", "[", ":", "ContextFilter", ".", "REVEAL_LENGTH", "]", ",", "len", "(", "value", ")", "-", "ContextFilter", ".", "REVEAL_LENGTH", ")", "except", "UnicodeError", ":", "return", "\"{}...[{} more bytes]\"", ".", "format", "(", "repr", "(", "value", "[", ":", "ContextFilter", ".", "REVEAL_LENGTH", "]", ")", ",", "len", "(", "value", ")", "-", "ContextFilter", ".", "REVEAL_LENGTH", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", ":", "return", "\"{}...[{} more bytes]\"", ".", "format", "(", "repr", "(", "value", "[", ":", "ContextFilter", ".", "REVEAL_LENGTH", "]", ")", ",", "len", "(", "value", ")", "-", "ContextFilter", ".", "REVEAL_LENGTH", ")", "else", ":", "return", "value", "record", ".", "msg", "=", "traverse_json_obj", "(", "record", ".", "msg", ",", "callback", "=", "modify", ")", "return", "True" ]
Filter record :param record: Record to filter :return:
[ "Filter", "record", ":", "param", "record", ":", "Record", "to", "filter", ":", "return", ":" ]
python
train
36
napalm-automation/napalm
napalm/ios/ios.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/ios/ios.py#L362-L375
def _normalize_merge_diff(diff): """Make compare_config() for merge look similar to replace config diff.""" new_diff = [] for line in diff.splitlines(): # Filter blank lines and prepend +sign if line.strip(): new_diff.append("+" + line) if new_diff: new_diff.insert( 0, "! incremental-diff failed; falling back to echo of merge file" ) else: new_diff.append("! No changes specified in merge file.") return "\n".join(new_diff)
[ "def", "_normalize_merge_diff", "(", "diff", ")", ":", "new_diff", "=", "[", "]", "for", "line", "in", "diff", ".", "splitlines", "(", ")", ":", "# Filter blank lines and prepend +sign", "if", "line", ".", "strip", "(", ")", ":", "new_diff", ".", "append", "(", "\"+\"", "+", "line", ")", "if", "new_diff", ":", "new_diff", ".", "insert", "(", "0", ",", "\"! incremental-diff failed; falling back to echo of merge file\"", ")", "else", ":", "new_diff", ".", "append", "(", "\"! No changes specified in merge file.\"", ")", "return", "\"\\n\"", ".", "join", "(", "new_diff", ")" ]
Make compare_config() for merge look similar to replace config diff.
[ "Make", "compare_config", "()", "for", "merge", "look", "similar", "to", "replace", "config", "diff", "." ]
python
train
39.428571
paulovn/sparql-kernel
sparqlkernel/connection.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/connection.py#L105-L141
def html_table(data, header=True, limit=None, withtype=False): """ Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table> """ if header and limit: limit += 1 ct = 'th' if header else 'td' rc = 'hdr' if header else 'odd' # import codecs # import datetime # with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f: # print( '************', datetime.datetime.now(), file=f ) # for n, row in enumerate(data): # print( '-------', n, file=f ) # for n, c in enumerate(row): # print( type(c), repr(c), file=f ) html = u'<table>' rn = -1 for rn, row in enumerate(data): html += u'<tr class={}>'.format(rc) html += '\n'.join((html_elem(c, ct, withtype) for c in row)) html += u'</tr>' rc = 'even' if rc == 'odd' else 'odd' ct = 'td' if limit: limit -= 1 if not limit: break return (0, '') if rn < 0 else (rn+1-header, html+u'</table>')
[ "def", "html_table", "(", "data", ",", "header", "=", "True", ",", "limit", "=", "None", ",", "withtype", "=", "False", ")", ":", "if", "header", "and", "limit", ":", "limit", "+=", "1", "ct", "=", "'th'", "if", "header", "else", "'td'", "rc", "=", "'hdr'", "if", "header", "else", "'odd'", "# import codecs", "# import datetime", "# with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f:", "# print( '************', datetime.datetime.now(), file=f )", "# for n, row in enumerate(data):", "# print( '-------', n, file=f )", "# for n, c in enumerate(row):", "# print( type(c), repr(c), file=f )", "html", "=", "u'<table>'", "rn", "=", "-", "1", "for", "rn", ",", "row", "in", "enumerate", "(", "data", ")", ":", "html", "+=", "u'<tr class={}>'", ".", "format", "(", "rc", ")", "html", "+=", "'\\n'", ".", "join", "(", "(", "html_elem", "(", "c", ",", "ct", ",", "withtype", ")", "for", "c", "in", "row", ")", ")", "html", "+=", "u'</tr>'", "rc", "=", "'even'", "if", "rc", "==", "'odd'", "else", "'odd'", "ct", "=", "'td'", "if", "limit", ":", "limit", "-=", "1", "if", "not", "limit", ":", "break", "return", "(", "0", ",", "''", ")", "if", "rn", "<", "0", "else", "(", "rn", "+", "1", "-", "header", ",", "html", "+", "u'</table>'", ")" ]
Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table>
[ "Return", "a", "double", "iterable", "as", "an", "HTML", "table" ]
python
train
36.594595
kblin/bioinf-helperlibs
helperlibs/bio/seqio.py
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L117-L146
def sanity_check_insdcio(handle, id_marker, fake_id_line): """Sanity check for insdcio style files""" found_id = False found_end_marker = False for line in handle: line = line.strip() if not line: continue if line.startswith(id_marker): found_id = True break if line.startswith('//'): found_end_marker = True break handle.seek(0) # We found an ID, file looks good. if found_id: return handle # If there's no ID and no end marker, just give up. if not found_end_marker: return handle # If we found an end marker but no ID, fake one. new_handle = StringIO() new_handle.write("%s\n" % fake_id_line) new_handle.write(handle.read()) new_handle.seek(0) return new_handle
[ "def", "sanity_check_insdcio", "(", "handle", ",", "id_marker", ",", "fake_id_line", ")", ":", "found_id", "=", "False", "found_end_marker", "=", "False", "for", "line", "in", "handle", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "if", "line", ".", "startswith", "(", "id_marker", ")", ":", "found_id", "=", "True", "break", "if", "line", ".", "startswith", "(", "'//'", ")", ":", "found_end_marker", "=", "True", "break", "handle", ".", "seek", "(", "0", ")", "# We found an ID, file looks good.", "if", "found_id", ":", "return", "handle", "# If there's no ID and no end marker, just give up.", "if", "not", "found_end_marker", ":", "return", "handle", "# If we found an end marker but no ID, fake one.", "new_handle", "=", "StringIO", "(", ")", "new_handle", ".", "write", "(", "\"%s\\n\"", "%", "fake_id_line", ")", "new_handle", ".", "write", "(", "handle", ".", "read", "(", ")", ")", "new_handle", ".", "seek", "(", "0", ")", "return", "new_handle" ]
Sanity check for insdcio style files
[ "Sanity", "check", "for", "insdcio", "style", "files" ]
python
train
26.866667
ssato/python-anyconfig
src/anyconfig/backend/xml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L207-L218
def _parse_attrs(elem, container=dict, **options): """ :param elem: ET Element object has attributes (elem.attrib) :param container: callble to make a container object :return: Parsed value or value itself depends on 'ac_parse_value' """ adic = dict((_tweak_ns(a, **options), v) for a, v in elem.attrib.items()) if options.get("ac_parse_value", False): return container(dict((k, anyconfig.parser.parse_single(v)) for k, v in adic.items())) return container(adic)
[ "def", "_parse_attrs", "(", "elem", ",", "container", "=", "dict", ",", "*", "*", "options", ")", ":", "adic", "=", "dict", "(", "(", "_tweak_ns", "(", "a", ",", "*", "*", "options", ")", ",", "v", ")", "for", "a", ",", "v", "in", "elem", ".", "attrib", ".", "items", "(", ")", ")", "if", "options", ".", "get", "(", "\"ac_parse_value\"", ",", "False", ")", ":", "return", "container", "(", "dict", "(", "(", "k", ",", "anyconfig", ".", "parser", ".", "parse_single", "(", "v", ")", ")", "for", "k", ",", "v", "in", "adic", ".", "items", "(", ")", ")", ")", "return", "container", "(", "adic", ")" ]
:param elem: ET Element object has attributes (elem.attrib) :param container: callble to make a container object :return: Parsed value or value itself depends on 'ac_parse_value'
[ ":", "param", "elem", ":", "ET", "Element", "object", "has", "attributes", "(", "elem", ".", "attrib", ")", ":", "param", "container", ":", "callble", "to", "make", "a", "container", "object", ":", "return", ":", "Parsed", "value", "or", "value", "itself", "depends", "on", "ac_parse_value" ]
python
train
43.5
Azure/azure-sdk-for-python
azure-mgmt-containerregistry/azure/mgmt/containerregistry/container_registry_management_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-containerregistry/azure/mgmt/containerregistry/container_registry_management_client.py#L246-L262
def webhooks(self): """Instance depends on the API version: * 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>` * 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>` * 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>` """ api_version = self._get_api_version('webhooks') if api_version == '2017-10-01': from .v2017_10_01.operations import WebhooksOperations as OperationClass elif api_version == '2018-02-01-preview': from .v2018_02_01_preview.operations import WebhooksOperations as OperationClass elif api_version == '2018-09-01': from .v2018_09_01.operations import WebhooksOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "webhooks", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'webhooks'", ")", "if", "api_version", "==", "'2017-10-01'", ":", "from", ".", "v2017_10_01", ".", "operations", "import", "WebhooksOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-02-01-preview'", ":", "from", ".", "v2018_02_01_preview", ".", "operations", "import", "WebhooksOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-09-01'", ":", "from", ".", "v2018_09_01", ".", "operations", "import", "WebhooksOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>` * 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>` * 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
test
68.176471
materialsproject/pymatgen
pymatgen/io/abinit/qutils.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/qutils.py#L129-L134
def any2mb(s): """Convert string or number to memory in megabytes.""" if is_string(s): return int(Memory.from_string(s).to("Mb")) else: return int(s)
[ "def", "any2mb", "(", "s", ")", ":", "if", "is_string", "(", "s", ")", ":", "return", "int", "(", "Memory", ".", "from_string", "(", "s", ")", ".", "to", "(", "\"Mb\"", ")", ")", "else", ":", "return", "int", "(", "s", ")" ]
Convert string or number to memory in megabytes.
[ "Convert", "string", "or", "number", "to", "memory", "in", "megabytes", "." ]
python
train
28.666667
jasonkeene/python-ubersmith
ubersmith/utils.py
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/utils.py#L101-L138
def to_nested_php_args(data, prefix_key=None): """ This function will take either a dict or list and will recursively loop through the values converting it into a format similar to a PHP array which Ubersmith requires for the info portion of the API's order.create method. """ is_root = prefix_key is None prefix_key = prefix_key if prefix_key else '' if islist(data): data_iter = data if is_root else enumerate(data) new_data = [] if is_root else {} elif isdict(data): data_iter = list(data.items()) new_data = {} else: raise TypeError('expected dict or list, got {0}'.format(type(data))) if islist(new_data): def data_set(k, v): new_data.append((k, v)) def data_update(d): for k, v in list(d.items()): new_data.append((k, v)) else: def data_set(k, v): new_data[k] = v data_update = new_data.update for key, value in data_iter: end_key = prefix_key + (str(key) if is_root else '[{0}]'.format(key)) if _is_leaf(value): data_set(end_key, value) else: nested_args = to_nested_php_args(value, end_key) data_update(nested_args) return new_data
[ "def", "to_nested_php_args", "(", "data", ",", "prefix_key", "=", "None", ")", ":", "is_root", "=", "prefix_key", "is", "None", "prefix_key", "=", "prefix_key", "if", "prefix_key", "else", "''", "if", "islist", "(", "data", ")", ":", "data_iter", "=", "data", "if", "is_root", "else", "enumerate", "(", "data", ")", "new_data", "=", "[", "]", "if", "is_root", "else", "{", "}", "elif", "isdict", "(", "data", ")", ":", "data_iter", "=", "list", "(", "data", ".", "items", "(", ")", ")", "new_data", "=", "{", "}", "else", ":", "raise", "TypeError", "(", "'expected dict or list, got {0}'", ".", "format", "(", "type", "(", "data", ")", ")", ")", "if", "islist", "(", "new_data", ")", ":", "def", "data_set", "(", "k", ",", "v", ")", ":", "new_data", ".", "append", "(", "(", "k", ",", "v", ")", ")", "def", "data_update", "(", "d", ")", ":", "for", "k", ",", "v", "in", "list", "(", "d", ".", "items", "(", ")", ")", ":", "new_data", ".", "append", "(", "(", "k", ",", "v", ")", ")", "else", ":", "def", "data_set", "(", "k", ",", "v", ")", ":", "new_data", "[", "k", "]", "=", "v", "data_update", "=", "new_data", ".", "update", "for", "key", ",", "value", "in", "data_iter", ":", "end_key", "=", "prefix_key", "+", "(", "str", "(", "key", ")", "if", "is_root", "else", "'[{0}]'", ".", "format", "(", "key", ")", ")", "if", "_is_leaf", "(", "value", ")", ":", "data_set", "(", "end_key", ",", "value", ")", "else", ":", "nested_args", "=", "to_nested_php_args", "(", "value", ",", "end_key", ")", "data_update", "(", "nested_args", ")", "return", "new_data" ]
This function will take either a dict or list and will recursively loop through the values converting it into a format similar to a PHP array which Ubersmith requires for the info portion of the API's order.create method.
[ "This", "function", "will", "take", "either", "a", "dict", "or", "list", "and", "will", "recursively", "loop", "through", "the", "values", "converting", "it", "into", "a", "format", "similar", "to", "a", "PHP", "array", "which", "Ubersmith", "requires", "for", "the", "info", "portion", "of", "the", "API", "s", "order", ".", "create", "method", "." ]
python
train
32.789474
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1199-L1208
def _build_kernel_function_declaration(self, name='kernel'): """Build and return kernel function declaration""" array_declarations, array_dimensions = self._build_array_declarations(with_init=False) scalar_declarations = self._build_scalar_declarations(with_init=False) const_declarations = self._build_const_declartions(with_init=False) return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations + const_declarations), type=c_ast.TypeDecl(declname=name, quals=[], type=c_ast.IdentifierType(names=['void'])))
[ "def", "_build_kernel_function_declaration", "(", "self", ",", "name", "=", "'kernel'", ")", ":", "array_declarations", ",", "array_dimensions", "=", "self", ".", "_build_array_declarations", "(", "with_init", "=", "False", ")", "scalar_declarations", "=", "self", ".", "_build_scalar_declarations", "(", "with_init", "=", "False", ")", "const_declarations", "=", "self", ".", "_build_const_declartions", "(", "with_init", "=", "False", ")", "return", "c_ast", ".", "FuncDecl", "(", "args", "=", "c_ast", ".", "ParamList", "(", "params", "=", "array_declarations", "+", "scalar_declarations", "+", "const_declarations", ")", ",", "type", "=", "c_ast", ".", "TypeDecl", "(", "declname", "=", "name", ",", "quals", "=", "[", "]", ",", "type", "=", "c_ast", ".", "IdentifierType", "(", "names", "=", "[", "'void'", "]", ")", ")", ")" ]
Build and return kernel function declaration
[ "Build", "and", "return", "kernel", "function", "declaration" ]
python
test
75.9
digidotcom/python-wvalib
wva/cli.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/cli.py#L110-L123
def cli(ctx, hostname, username, password, config_dir, https): """Command-line interface for interacting with a WVA device""" ctx.is_root = True ctx.user_values_entered = False ctx.config_dir = os.path.abspath(os.path.expanduser(config_dir)) ctx.config = load_config(ctx) ctx.hostname = hostname ctx.username = username ctx.password = password ctx.https = https # Creating the WVA object is deferred as some commands like clearconfig # should not require a username/password to perform them ctx.wva = None
[ "def", "cli", "(", "ctx", ",", "hostname", ",", "username", ",", "password", ",", "config_dir", ",", "https", ")", ":", "ctx", ".", "is_root", "=", "True", "ctx", ".", "user_values_entered", "=", "False", "ctx", ".", "config_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "config_dir", ")", ")", "ctx", ".", "config", "=", "load_config", "(", "ctx", ")", "ctx", ".", "hostname", "=", "hostname", "ctx", ".", "username", "=", "username", "ctx", ".", "password", "=", "password", "ctx", ".", "https", "=", "https", "# Creating the WVA object is deferred as some commands like clearconfig", "# should not require a username/password to perform them", "ctx", ".", "wva", "=", "None" ]
Command-line interface for interacting with a WVA device
[ "Command", "-", "line", "interface", "for", "interacting", "with", "a", "WVA", "device" ]
python
train
38.642857
kuszaj/claptcha
claptcha/claptcha.py
https://github.com/kuszaj/claptcha/blob/0245f656e6febf34e32b5238196e992929df42c7/claptcha/claptcha.py#L88-L125
def image(self): r""" Tuple with a CAPTCHA text and a Image object. Images are generated on the fly, using given text source, TTF font and other parameters passable through __init__. All letters in used text are morphed. Also a line is morphed and pased onto CAPTCHA text. Additionaly, if self.noise > 1/255, a "snowy" image is merged with CAPTCHA image with a 50/50 ratio. Property returns a pair containing a string with text in returned image and image itself. :returns: ``tuple`` (CAPTCHA text, Image object) """ text = self.text w, h = self.font.getsize(text) margin_x = round(self.margin_x * w / self.w) margin_y = round(self.margin_y * h / self.h) image = Image.new('RGB', (w + 2*margin_x, h + 2*margin_y), (255, 255, 255)) # Text self._writeText(image, text, pos=(margin_x, margin_y)) # Line self._drawLine(image) # White noise noise = self._whiteNoise(image.size) if noise is not None: image = Image.blend(image, noise, 0.5) # Resize image = image.resize(self.size, resample=self.resample) return (text, image)
[ "def", "image", "(", "self", ")", ":", "text", "=", "self", ".", "text", "w", ",", "h", "=", "self", ".", "font", ".", "getsize", "(", "text", ")", "margin_x", "=", "round", "(", "self", ".", "margin_x", "*", "w", "/", "self", ".", "w", ")", "margin_y", "=", "round", "(", "self", ".", "margin_y", "*", "h", "/", "self", ".", "h", ")", "image", "=", "Image", ".", "new", "(", "'RGB'", ",", "(", "w", "+", "2", "*", "margin_x", ",", "h", "+", "2", "*", "margin_y", ")", ",", "(", "255", ",", "255", ",", "255", ")", ")", "# Text", "self", ".", "_writeText", "(", "image", ",", "text", ",", "pos", "=", "(", "margin_x", ",", "margin_y", ")", ")", "# Line", "self", ".", "_drawLine", "(", "image", ")", "# White noise", "noise", "=", "self", ".", "_whiteNoise", "(", "image", ".", "size", ")", "if", "noise", "is", "not", "None", ":", "image", "=", "Image", ".", "blend", "(", "image", ",", "noise", ",", "0.5", ")", "# Resize", "image", "=", "image", ".", "resize", "(", "self", ".", "size", ",", "resample", "=", "self", ".", "resample", ")", "return", "(", "text", ",", "image", ")" ]
r""" Tuple with a CAPTCHA text and a Image object. Images are generated on the fly, using given text source, TTF font and other parameters passable through __init__. All letters in used text are morphed. Also a line is morphed and pased onto CAPTCHA text. Additionaly, if self.noise > 1/255, a "snowy" image is merged with CAPTCHA image with a 50/50 ratio. Property returns a pair containing a string with text in returned image and image itself. :returns: ``tuple`` (CAPTCHA text, Image object)
[ "r", "Tuple", "with", "a", "CAPTCHA", "text", "and", "a", "Image", "object", "." ]
python
train
33.210526
twisted/axiom
axiom/store.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/store.py#L1010-L1032
def _diffSchema(diskSchema, memorySchema): """ Format a schema mismatch for human consumption. @param diskSchema: The on-disk schema. @param memorySchema: The in-memory schema. @rtype: L{bytes} @return: A description of the schema differences. """ diskSchema = set(diskSchema) memorySchema = set(memorySchema) diskOnly = diskSchema - memorySchema memoryOnly = memorySchema - diskSchema diff = [] if diskOnly: diff.append('Only on disk:') diff.extend(map(repr, diskOnly)) if memoryOnly: diff.append('Only in memory:') diff.extend(map(repr, memoryOnly)) return '\n'.join(diff)
[ "def", "_diffSchema", "(", "diskSchema", ",", "memorySchema", ")", ":", "diskSchema", "=", "set", "(", "diskSchema", ")", "memorySchema", "=", "set", "(", "memorySchema", ")", "diskOnly", "=", "diskSchema", "-", "memorySchema", "memoryOnly", "=", "memorySchema", "-", "diskSchema", "diff", "=", "[", "]", "if", "diskOnly", ":", "diff", ".", "append", "(", "'Only on disk:'", ")", "diff", ".", "extend", "(", "map", "(", "repr", ",", "diskOnly", ")", ")", "if", "memoryOnly", ":", "diff", ".", "append", "(", "'Only in memory:'", ")", "diff", ".", "extend", "(", "map", "(", "repr", ",", "memoryOnly", ")", ")", "return", "'\\n'", ".", "join", "(", "diff", ")" ]
Format a schema mismatch for human consumption. @param diskSchema: The on-disk schema. @param memorySchema: The in-memory schema. @rtype: L{bytes} @return: A description of the schema differences.
[ "Format", "a", "schema", "mismatch", "for", "human", "consumption", "." ]
python
train
28.130435
ajenhl/tacl
tacl/corpus.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/corpus.py#L24-L33
def get_sigla(self, work): """Returns a list of all of the sigla for `work`. :param work: name of work :type work: `str` :rtype: `list` of `str` """ return [os.path.splitext(os.path.basename(path))[0] for path in glob.glob(os.path.join(self._path, work, '*.txt'))]
[ "def", "get_sigla", "(", "self", ",", "work", ")", ":", "return", "[", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "[", "0", "]", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "work", ",", "'*.txt'", ")", ")", "]" ]
Returns a list of all of the sigla for `work`. :param work: name of work :type work: `str` :rtype: `list` of `str`
[ "Returns", "a", "list", "of", "all", "of", "the", "sigla", "for", "work", "." ]
python
train
32.1
prompt-toolkit/ptpython
ptpython/history_browser.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/history_browser.py#L97-L104
def _create_popup_window(title, body): """ Return the layout for a pop-up window. It consists of a title bar showing the `title` text, and a body layout. The window is surrounded by borders. """ assert isinstance(title, six.text_type) assert isinstance(body, Container) return Frame(body=body, title=title)
[ "def", "_create_popup_window", "(", "title", ",", "body", ")", ":", "assert", "isinstance", "(", "title", ",", "six", ".", "text_type", ")", "assert", "isinstance", "(", "body", ",", "Container", ")", "return", "Frame", "(", "body", "=", "body", ",", "title", "=", "title", ")" ]
Return the layout for a pop-up window. It consists of a title bar showing the `title` text, and a body layout. The window is surrounded by borders.
[ "Return", "the", "layout", "for", "a", "pop", "-", "up", "window", ".", "It", "consists", "of", "a", "title", "bar", "showing", "the", "title", "text", "and", "a", "body", "layout", ".", "The", "window", "is", "surrounded", "by", "borders", "." ]
python
train
40.875
inveniosoftware-attic/invenio-utils
invenio_utils/date.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/date.py#L497-L513
def get_time_estimator(total): """Given a total amount of items to compute, return a function that, if called every time an item is computed (or every step items are computed) will give a time estimation for how long it will take to compute the whole set of itmes. The function will return two values: the first is the number of seconds that are still needed to compute the whole set, the second value is the time in the future when the operation is expected to end. """ t1 = time.time() count = [0] def estimate_needed_time(step=1): count[0] += step t2 = time.time() t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0]) return t3, t3 + t1 return estimate_needed_time
[ "def", "get_time_estimator", "(", "total", ")", ":", "t1", "=", "time", ".", "time", "(", ")", "count", "=", "[", "0", "]", "def", "estimate_needed_time", "(", "step", "=", "1", ")", ":", "count", "[", "0", "]", "+=", "step", "t2", "=", "time", ".", "time", "(", ")", "t3", "=", "1.0", "*", "(", "t2", "-", "t1", ")", "/", "count", "[", "0", "]", "*", "(", "total", "-", "count", "[", "0", "]", ")", "return", "t3", ",", "t3", "+", "t1", "return", "estimate_needed_time" ]
Given a total amount of items to compute, return a function that, if called every time an item is computed (or every step items are computed) will give a time estimation for how long it will take to compute the whole set of itmes. The function will return two values: the first is the number of seconds that are still needed to compute the whole set, the second value is the time in the future when the operation is expected to end.
[ "Given", "a", "total", "amount", "of", "items", "to", "compute", "return", "a", "function", "that", "if", "called", "every", "time", "an", "item", "is", "computed", "(", "or", "every", "step", "items", "are", "computed", ")", "will", "give", "a", "time", "estimation", "for", "how", "long", "it", "will", "take", "to", "compute", "the", "whole", "set", "of", "itmes", ".", "The", "function", "will", "return", "two", "values", ":", "the", "first", "is", "the", "number", "of", "seconds", "that", "are", "still", "needed", "to", "compute", "the", "whole", "set", "the", "second", "value", "is", "the", "time", "in", "the", "future", "when", "the", "operation", "is", "expected", "to", "end", "." ]
python
train
42.823529
atztogo/phonopy
phonopy/interface/qe.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/interface/qe.py#L416-L447
def run(self, cell, is_full_fc=False, parse_fc=True): """Make supercell force constants readable for phonopy Note ---- Born effective charges and dielectric constant tensor are read from QE output file if they exist. But this means dipole-dipole contributions are removed from force constants and this force constants matrix is not usable in phonopy. Arguments --------- cell : PhonopyAtoms Primitive cell used for QE/PH calculation. is_full_fc : Bool, optional, default=False Whether to create full or compact force constants. parse_fc : Bool, optional, default=True Force constants file of QE is not parsed when this is False. False may be used when expected to parse only epsilon and born. """ with open(self._filename) as f: fc_dct = self._parse_q2r(f) self.dimension = fc_dct['dimension'] self.epsilon = fc_dct['dielectric'] self.borns = fc_dct['born'] if parse_fc: (self.fc, self.primitive, self.supercell) = self._arrange_supercell_fc( cell, fc_dct['fc'], is_full_fc=is_full_fc)
[ "def", "run", "(", "self", ",", "cell", ",", "is_full_fc", "=", "False", ",", "parse_fc", "=", "True", ")", ":", "with", "open", "(", "self", ".", "_filename", ")", "as", "f", ":", "fc_dct", "=", "self", ".", "_parse_q2r", "(", "f", ")", "self", ".", "dimension", "=", "fc_dct", "[", "'dimension'", "]", "self", ".", "epsilon", "=", "fc_dct", "[", "'dielectric'", "]", "self", ".", "borns", "=", "fc_dct", "[", "'born'", "]", "if", "parse_fc", ":", "(", "self", ".", "fc", ",", "self", ".", "primitive", ",", "self", ".", "supercell", ")", "=", "self", ".", "_arrange_supercell_fc", "(", "cell", ",", "fc_dct", "[", "'fc'", "]", ",", "is_full_fc", "=", "is_full_fc", ")" ]
Make supercell force constants readable for phonopy Note ---- Born effective charges and dielectric constant tensor are read from QE output file if they exist. But this means dipole-dipole contributions are removed from force constants and this force constants matrix is not usable in phonopy. Arguments --------- cell : PhonopyAtoms Primitive cell used for QE/PH calculation. is_full_fc : Bool, optional, default=False Whether to create full or compact force constants. parse_fc : Bool, optional, default=True Force constants file of QE is not parsed when this is False. False may be used when expected to parse only epsilon and born.
[ "Make", "supercell", "force", "constants", "readable", "for", "phonopy" ]
python
train
39.03125
vladcalin/gemstone
gemstone/core/decorators.py
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/decorators.py#L14-L32
def event_handler(event_name): """ Decorator for designating a handler for an event type. ``event_name`` must be a string representing the name of the event type. The decorated function must accept a parameter: the body of the received event, which will be a Python object that can be encoded as a JSON (dict, list, str, int, bool, float or None) :param event_name: The name of the event that will be handled. Only one handler per event name is supported by the same microservice. """ def wrapper(func): func._event_handler = True func._handled_event = event_name return func return wrapper
[ "def", "event_handler", "(", "event_name", ")", ":", "def", "wrapper", "(", "func", ")", ":", "func", ".", "_event_handler", "=", "True", "func", ".", "_handled_event", "=", "event_name", "return", "func", "return", "wrapper" ]
Decorator for designating a handler for an event type. ``event_name`` must be a string representing the name of the event type. The decorated function must accept a parameter: the body of the received event, which will be a Python object that can be encoded as a JSON (dict, list, str, int, bool, float or None) :param event_name: The name of the event that will be handled. Only one handler per event name is supported by the same microservice.
[ "Decorator", "for", "designating", "a", "handler", "for", "an", "event", "type", ".", "event_name", "must", "be", "a", "string", "representing", "the", "name", "of", "the", "event", "type", "." ]
python
train
34.894737
Erotemic/ubelt
ubelt/util_stream.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_stream.py#L28-L37
def isatty(self): # nocover """ Returns true of the redirect is a terminal. Notes: Needed for IPython.embed to work properly when this class is used to override stdout / stderr. """ return (self.redirect is not None and hasattr(self.redirect, 'isatty') and self.redirect.isatty())
[ "def", "isatty", "(", "self", ")", ":", "# nocover", "return", "(", "self", ".", "redirect", "is", "not", "None", "and", "hasattr", "(", "self", ".", "redirect", ",", "'isatty'", ")", "and", "self", ".", "redirect", ".", "isatty", "(", ")", ")" ]
Returns true of the redirect is a terminal. Notes: Needed for IPython.embed to work properly when this class is used to override stdout / stderr.
[ "Returns", "true", "of", "the", "redirect", "is", "a", "terminal", "." ]
python
valid
35.3
dereneaton/ipyrad
ipyrad/assemble/write_outfiles.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L2229-L2455
def vcfchunk(data, optim, sidx, chunk, full): """ Function called within make_vcf to run chunks on separate engines. """ ## empty array to be filled before writing ## will not actually be optim*maxlen, extra needs to be trimmed maxlen = data._hackersonly["max_fragment_length"] + 20 ## get data sliced (optim chunks at a time) hslice = [chunk, chunk+optim] ## read all taxa from disk (faster), then subsample taxa with sidx and ## keepmask to greatly reduce the memory load with h5py.File(data.database, 'r') as co5: afilt = co5["filters"][hslice[0]:hslice[1], :] keepmask = afilt.sum(axis=1) == 0 ## apply mask to edges aedge = co5["edges"][hslice[0]:hslice[1], :] aedge = aedge[keepmask, :] del afilt ## same memory subsampling. with h5py.File(data.clust_database, 'r') as io5: ## apply mask to edges to aseqs and acatg #aseqs = io5["seqs"][hslice[0]:hslice[1], :, :].view(np.uint8) ## need to read in seqs with upper b/c lowercase allele info aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], :, :]).view(np.uint8) aseqs = aseqs[keepmask, :] aseqs = aseqs[:, sidx, :] acatg = io5["catgs"][hslice[0]:hslice[1], :, :, :] acatg = acatg[keepmask, :] acatg = acatg[:, sidx, :, :] achrom = io5["chroms"][hslice[0]:hslice[1]] achrom = achrom[keepmask, :] LOGGER.info('acatg.shape %s', acatg.shape) ## to save memory some columns are stored in diff dtypes until printing if not full: with h5py.File(data.database, 'r') as co5: snps = co5["snps"][hslice[0]:hslice[1], :] snps = snps[keepmask, :] snps = snps.sum(axis=2) snpidxs = snps > 0 maxsnplen = snps.sum() ## vcf info to fill, this is bigger than the actual array nrows = maxsnplen cols0 = np.zeros(nrows, dtype=np.int64) #h5py.special_dtype(vlen=bytes)) cols1 = np.zeros(nrows, dtype=np.uint32) cols34 = np.zeros((nrows, 2), dtype="S5") cols7 = np.zeros((nrows, 1), dtype="S20") ## when nsamples is high this blows up memory (e.g., dim=(5M x 500)) ## so we'll instead create a list of arrays with 10 samples at a time. ## maybe later replace this with a h5 array tmph = os.path.join(data.dirs.outfiles, ".tmp.{}.h5".format(hslice[0])) htmp = h5py.File(tmph, 'w') htmp.create_dataset("vcf", shape=(nrows, sum(sidx)), dtype="S24") ## which loci passed all filters init = 0 ## write loci that passed after trimming edges, then write snp string locindex = np.where(keepmask)[0] for iloc in xrange(aseqs.shape[0]): edg = aedge[iloc] ## grab all seqs between edges if not 'pair' in data.paramsdict["datatype"]: seq = aseqs[iloc, :, edg[0]:edg[1]+1] catg = acatg[iloc, :, edg[0]:edg[1]+1] if not full: snpidx = snpidxs[iloc, edg[0]:edg[1]+1] seq = seq[:, snpidx] catg = catg[:, snpidx] else: seq = np.hstack([aseqs[iloc, :, edg[0]:edg[1]+1], aseqs[iloc, :, edg[2]:edg[3]+1]]) catg = np.hstack([acatg[iloc, :, edg[0]:edg[1]+1], acatg[iloc, :, edg[2]:edg[3]+1]]) if not full: snpidx = np.hstack([snpidxs[iloc, edg[0]:edg[1]+1], snpidxs[iloc, edg[2]:edg[3]+1]]) seq = seq[:, snpidx] catg = catg[:, snpidx] ## empty arrs to fill alleles = np.zeros((nrows, 4), dtype=np.uint8) genos = np.zeros((seq.shape[1], sum(sidx)), dtype="S4") genos[:] = "./.:" ## ---- build string array ---- pos = 0 ## If any < 0 this indicates an anonymous locus in denovo+ref assembly if achrom[iloc][0] > 0: pos = achrom[iloc][1] cols0[init:init+seq.shape[1]] = achrom[iloc][0] cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1 else: if full: cols1[init:init+seq.shape[1]] = pos + np.arange(seq.shape[1]) + 1 else: cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1 cols0[init:init+seq.shape[1]] = (chunk + locindex[iloc] + 1) * -1 ## fill reference base alleles = reftrick(seq, GETCONS) ## get the info string column tmp0 = np.sum(catg, axis=2) tmp1 = tmp0 != 0 tmp2 = tmp1.sum(axis=1) > 0 nsamp = np.sum(tmp1, axis=0) depth = np.sum(tmp0, axis=0) list7 = [["NS={};DP={}".format(i, j)] for i, j in zip(nsamp, depth)] if list7: cols7[init:init+seq.shape[1]] = list7 ## default fill cons sites where no variants genos[tmp1.T] = "0/0:" ## fill cons genotypes for sites with alt alleles for taxa in order mask = alleles[:, 1] == 46 mask += alleles[:, 1] == 45 obs = alleles[~mask, :] alts = seq[:, ~mask] who = np.where(mask == False)[0] ## fill variable sites for site in xrange(alts.shape[1]): bases = alts[:, site] #LOGGER.info("bases %s", bases) ohere = obs[site][obs[site] != 0] #LOGGER.info("ohere %s", ohere) alls = np.array([DCONS[i] for i in bases], dtype=np.uint32) #LOGGER.info("all %s", alls) for jdx in xrange(ohere.shape[0]): alls[alls == ohere[jdx]] = jdx #LOGGER.info("all2 %s", alls) ## fill into array for cidx in xrange(catg.shape[0]): if tmp2[cidx]: if alls[cidx][0] < 5: genos[who[site], cidx] = "/".join(alls[cidx].astype("S1").tolist())+":" else: genos[who[site], cidx] = "./.:" #LOGGER.info("genos filled: %s %s %s", who[site], cidx, genos) ## build geno+depth strings ## for each taxon enter 4 catg values fulltmp = np.zeros((seq.shape[1], catg.shape[0]), dtype="S24") for cidx in xrange(catg.shape[0]): ## fill catgs from catgs tmp0 = [str(i.sum()) for i in catg[cidx]] tmp1 = [",".join(i) for i in catg[cidx].astype("S4").tolist()] tmp2 = ["".join(i+j+":"+k) for i, j, k in zip(genos[:, cidx], tmp0, tmp1)] ## fill tmp allcidx fulltmp[:, cidx] = tmp2 ## write to h5 for this locus htmp["vcf"][init:init+seq.shape[1], :] = fulltmp cols34[init:init+seq.shape[1], 0] = alleles[:, 0].view("S1") cols34[init:init+seq.shape[1], 1] = [",".join([j for j in i if j]) \ for i in alleles[:, 1:].view("S1").tolist()] ## advance counter init += seq.shape[1] ## trim off empty rows if they exist withdat = cols0 != 0 tot = withdat.sum() ## get scaffold names faidict = {} if (data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]) and \ (os.path.exists(data.paramsdict["reference_sequence"])): fai = pd.read_csv(data.paramsdict["reference_sequence"] + ".fai", names=['scaffold', 'size', 'sumsize', 'a', 'b'], sep="\t") faidict = {i+1:j for i,j in enumerate(fai.scaffold)} try: ## This is hax, but it's the only way it will work. The faidict uses positive numbers ## for reference sequence mapped loci for the CHROM/POS info, and it uses negative ## numbers for anonymous loci. Both are 1 indexed, which is where that last `+ 2` comes from. faidict.update({-i:"locus_{}".format(i-1) for i in xrange(chunk+1, chunk + optim + 2)}) chroms = [faidict[i] for i in cols0] except Exception as inst: LOGGER.error("Invalid chromosome dictionary indexwat: {}".format(inst)) LOGGER.debug("faidict {}".format([str(k)+"/"+str(v) for k, v in faidict.items() if "locus" in v])) LOGGER.debug("chroms {}".format([x for x in cols0 if x < 0])) raise cols0 = np.array(chroms) #else: # cols0 = np.array(["locus_{}".format(i) for i in cols0-1]) ## Only write if there is some data that passed filtering if tot: LOGGER.debug("Writing data to vcf") if not full: writer = open(data.outfiles.vcf+".{}".format(chunk), 'w') else: writer = gzip.open(data.outfiles.vcf+".{}".format(chunk), 'w') try: ## write in iterations b/c it can be freakin huge. ## for cols0 and cols1 the 'newaxis' slice and the transpose ## are for turning the 1d arrays into column vectors. np.savetxt(writer, np.concatenate( (cols0[:tot][np.newaxis].T, cols1[:tot][np.newaxis].T, np.array([["."]]*tot, dtype="S1"), cols34[:tot, :], np.array([["13", "PASS"]]*tot, dtype="S4"), cols7[:tot, :], np.array([["GT:DP:CATG"]]*tot, dtype="S10"), htmp["vcf"][:tot, :], ), axis=1), delimiter="\t", fmt="%s") except Exception as inst: LOGGER.error("Error building vcf file - ".format(inst)) raise writer.close() ## close and remove tmp h5 htmp.close() os.remove(tmph)
[ "def", "vcfchunk", "(", "data", ",", "optim", ",", "sidx", ",", "chunk", ",", "full", ")", ":", "## empty array to be filled before writing", "## will not actually be optim*maxlen, extra needs to be trimmed", "maxlen", "=", "data", ".", "_hackersonly", "[", "\"max_fragment_length\"", "]", "+", "20", "## get data sliced (optim chunks at a time)", "hslice", "=", "[", "chunk", ",", "chunk", "+", "optim", "]", "## read all taxa from disk (faster), then subsample taxa with sidx and", "## keepmask to greatly reduce the memory load", "with", "h5py", ".", "File", "(", "data", ".", "database", ",", "'r'", ")", "as", "co5", ":", "afilt", "=", "co5", "[", "\"filters\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", ",", ":", "]", "keepmask", "=", "afilt", ".", "sum", "(", "axis", "=", "1", ")", "==", "0", "## apply mask to edges", "aedge", "=", "co5", "[", "\"edges\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", ",", ":", "]", "aedge", "=", "aedge", "[", "keepmask", ",", ":", "]", "del", "afilt", "## same memory subsampling.", "with", "h5py", ".", "File", "(", "data", ".", "clust_database", ",", "'r'", ")", "as", "io5", ":", "## apply mask to edges to aseqs and acatg", "#aseqs = io5[\"seqs\"][hslice[0]:hslice[1], :, :].view(np.uint8)", "## need to read in seqs with upper b/c lowercase allele info", "aseqs", "=", "np", ".", "char", ".", "upper", "(", "io5", "[", "\"seqs\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", ",", ":", ",", ":", "]", ")", ".", "view", "(", "np", ".", "uint8", ")", "aseqs", "=", "aseqs", "[", "keepmask", ",", ":", "]", "aseqs", "=", "aseqs", "[", ":", ",", "sidx", ",", ":", "]", "acatg", "=", "io5", "[", "\"catgs\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", ",", ":", ",", ":", ",", ":", "]", "acatg", "=", "acatg", "[", "keepmask", ",", ":", "]", "acatg", "=", "acatg", "[", ":", ",", "sidx", ",", ":", ",", ":", "]", "achrom", "=", "io5", "[", "\"chroms\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", "]", "achrom", "=", "achrom", "[", "keepmask", ",", ":", "]", "LOGGER", ".", "info", "(", "'acatg.shape %s'", ",", "acatg", ".", "shape", ")", "## to save memory some columns are stored in diff dtypes until printing", "if", "not", "full", ":", "with", "h5py", ".", "File", "(", "data", ".", "database", ",", "'r'", ")", "as", "co5", ":", "snps", "=", "co5", "[", "\"snps\"", "]", "[", "hslice", "[", "0", "]", ":", "hslice", "[", "1", "]", ",", ":", "]", "snps", "=", "snps", "[", "keepmask", ",", ":", "]", "snps", "=", "snps", ".", "sum", "(", "axis", "=", "2", ")", "snpidxs", "=", "snps", ">", "0", "maxsnplen", "=", "snps", ".", "sum", "(", ")", "## vcf info to fill, this is bigger than the actual array", "nrows", "=", "maxsnplen", "cols0", "=", "np", ".", "zeros", "(", "nrows", ",", "dtype", "=", "np", ".", "int64", ")", "#h5py.special_dtype(vlen=bytes))", "cols1", "=", "np", ".", "zeros", "(", "nrows", ",", "dtype", "=", "np", ".", "uint32", ")", "cols34", "=", "np", ".", "zeros", "(", "(", "nrows", ",", "2", ")", ",", "dtype", "=", "\"S5\"", ")", "cols7", "=", "np", ".", "zeros", "(", "(", "nrows", ",", "1", ")", ",", "dtype", "=", "\"S20\"", ")", "## when nsamples is high this blows up memory (e.g., dim=(5M x 500))", "## so we'll instead create a list of arrays with 10 samples at a time.", "## maybe later replace this with a h5 array", "tmph", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "\".tmp.{}.h5\"", ".", "format", "(", "hslice", "[", "0", "]", ")", ")", "htmp", "=", "h5py", ".", "File", "(", "tmph", ",", "'w'", ")", "htmp", ".", "create_dataset", "(", "\"vcf\"", ",", "shape", "=", "(", "nrows", ",", "sum", "(", "sidx", ")", ")", ",", "dtype", "=", "\"S24\"", ")", "## which loci passed all filters", "init", "=", "0", "## write loci that passed after trimming edges, then write snp string", "locindex", "=", "np", ".", "where", "(", "keepmask", ")", "[", "0", "]", "for", "iloc", "in", "xrange", "(", "aseqs", ".", "shape", "[", "0", "]", ")", ":", "edg", "=", "aedge", "[", "iloc", "]", "## grab all seqs between edges", "if", "not", "'pair'", "in", "data", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "seq", "=", "aseqs", "[", "iloc", ",", ":", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", "catg", "=", "acatg", "[", "iloc", ",", ":", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", "if", "not", "full", ":", "snpidx", "=", "snpidxs", "[", "iloc", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", "seq", "=", "seq", "[", ":", ",", "snpidx", "]", "catg", "=", "catg", "[", ":", ",", "snpidx", "]", "else", ":", "seq", "=", "np", ".", "hstack", "(", "[", "aseqs", "[", "iloc", ",", ":", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", ",", "aseqs", "[", "iloc", ",", ":", ",", "edg", "[", "2", "]", ":", "edg", "[", "3", "]", "+", "1", "]", "]", ")", "catg", "=", "np", ".", "hstack", "(", "[", "acatg", "[", "iloc", ",", ":", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", ",", "acatg", "[", "iloc", ",", ":", ",", "edg", "[", "2", "]", ":", "edg", "[", "3", "]", "+", "1", "]", "]", ")", "if", "not", "full", ":", "snpidx", "=", "np", ".", "hstack", "(", "[", "snpidxs", "[", "iloc", ",", "edg", "[", "0", "]", ":", "edg", "[", "1", "]", "+", "1", "]", ",", "snpidxs", "[", "iloc", ",", "edg", "[", "2", "]", ":", "edg", "[", "3", "]", "+", "1", "]", "]", ")", "seq", "=", "seq", "[", ":", ",", "snpidx", "]", "catg", "=", "catg", "[", ":", ",", "snpidx", "]", "## empty arrs to fill", "alleles", "=", "np", ".", "zeros", "(", "(", "nrows", ",", "4", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "genos", "=", "np", ".", "zeros", "(", "(", "seq", ".", "shape", "[", "1", "]", ",", "sum", "(", "sidx", ")", ")", ",", "dtype", "=", "\"S4\"", ")", "genos", "[", ":", "]", "=", "\"./.:\"", "## ---- build string array ----", "pos", "=", "0", "## If any < 0 this indicates an anonymous locus in denovo+ref assembly", "if", "achrom", "[", "iloc", "]", "[", "0", "]", ">", "0", ":", "pos", "=", "achrom", "[", "iloc", "]", "[", "1", "]", "cols0", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "achrom", "[", "iloc", "]", "[", "0", "]", "cols1", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "pos", "+", "np", ".", "where", "(", "snpidx", ")", "[", "0", "]", "+", "1", "else", ":", "if", "full", ":", "cols1", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "pos", "+", "np", ".", "arange", "(", "seq", ".", "shape", "[", "1", "]", ")", "+", "1", "else", ":", "cols1", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "pos", "+", "np", ".", "where", "(", "snpidx", ")", "[", "0", "]", "+", "1", "cols0", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "(", "chunk", "+", "locindex", "[", "iloc", "]", "+", "1", ")", "*", "-", "1", "## fill reference base", "alleles", "=", "reftrick", "(", "seq", ",", "GETCONS", ")", "## get the info string column", "tmp0", "=", "np", ".", "sum", "(", "catg", ",", "axis", "=", "2", ")", "tmp1", "=", "tmp0", "!=", "0", "tmp2", "=", "tmp1", ".", "sum", "(", "axis", "=", "1", ")", ">", "0", "nsamp", "=", "np", ".", "sum", "(", "tmp1", ",", "axis", "=", "0", ")", "depth", "=", "np", ".", "sum", "(", "tmp0", ",", "axis", "=", "0", ")", "list7", "=", "[", "[", "\"NS={};DP={}\"", ".", "format", "(", "i", ",", "j", ")", "]", "for", "i", ",", "j", "in", "zip", "(", "nsamp", ",", "depth", ")", "]", "if", "list7", ":", "cols7", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", "]", "=", "list7", "## default fill cons sites where no variants", "genos", "[", "tmp1", ".", "T", "]", "=", "\"0/0:\"", "## fill cons genotypes for sites with alt alleles for taxa in order", "mask", "=", "alleles", "[", ":", ",", "1", "]", "==", "46", "mask", "+=", "alleles", "[", ":", ",", "1", "]", "==", "45", "obs", "=", "alleles", "[", "~", "mask", ",", ":", "]", "alts", "=", "seq", "[", ":", ",", "~", "mask", "]", "who", "=", "np", ".", "where", "(", "mask", "==", "False", ")", "[", "0", "]", "## fill variable sites", "for", "site", "in", "xrange", "(", "alts", ".", "shape", "[", "1", "]", ")", ":", "bases", "=", "alts", "[", ":", ",", "site", "]", "#LOGGER.info(\"bases %s\", bases)", "ohere", "=", "obs", "[", "site", "]", "[", "obs", "[", "site", "]", "!=", "0", "]", "#LOGGER.info(\"ohere %s\", ohere)", "alls", "=", "np", ".", "array", "(", "[", "DCONS", "[", "i", "]", "for", "i", "in", "bases", "]", ",", "dtype", "=", "np", ".", "uint32", ")", "#LOGGER.info(\"all %s\", alls)", "for", "jdx", "in", "xrange", "(", "ohere", ".", "shape", "[", "0", "]", ")", ":", "alls", "[", "alls", "==", "ohere", "[", "jdx", "]", "]", "=", "jdx", "#LOGGER.info(\"all2 %s\", alls)", "## fill into array", "for", "cidx", "in", "xrange", "(", "catg", ".", "shape", "[", "0", "]", ")", ":", "if", "tmp2", "[", "cidx", "]", ":", "if", "alls", "[", "cidx", "]", "[", "0", "]", "<", "5", ":", "genos", "[", "who", "[", "site", "]", ",", "cidx", "]", "=", "\"/\"", ".", "join", "(", "alls", "[", "cidx", "]", ".", "astype", "(", "\"S1\"", ")", ".", "tolist", "(", ")", ")", "+", "\":\"", "else", ":", "genos", "[", "who", "[", "site", "]", ",", "cidx", "]", "=", "\"./.:\"", "#LOGGER.info(\"genos filled: %s %s %s\", who[site], cidx, genos)", "## build geno+depth strings", "## for each taxon enter 4 catg values", "fulltmp", "=", "np", ".", "zeros", "(", "(", "seq", ".", "shape", "[", "1", "]", ",", "catg", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "\"S24\"", ")", "for", "cidx", "in", "xrange", "(", "catg", ".", "shape", "[", "0", "]", ")", ":", "## fill catgs from catgs", "tmp0", "=", "[", "str", "(", "i", ".", "sum", "(", ")", ")", "for", "i", "in", "catg", "[", "cidx", "]", "]", "tmp1", "=", "[", "\",\"", ".", "join", "(", "i", ")", "for", "i", "in", "catg", "[", "cidx", "]", ".", "astype", "(", "\"S4\"", ")", ".", "tolist", "(", ")", "]", "tmp2", "=", "[", "\"\"", ".", "join", "(", "i", "+", "j", "+", "\":\"", "+", "k", ")", "for", "i", ",", "j", ",", "k", "in", "zip", "(", "genos", "[", ":", ",", "cidx", "]", ",", "tmp0", ",", "tmp1", ")", "]", "## fill tmp allcidx", "fulltmp", "[", ":", ",", "cidx", "]", "=", "tmp2", "## write to h5 for this locus", "htmp", "[", "\"vcf\"", "]", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", ",", ":", "]", "=", "fulltmp", "cols34", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", ",", "0", "]", "=", "alleles", "[", ":", ",", "0", "]", ".", "view", "(", "\"S1\"", ")", "cols34", "[", "init", ":", "init", "+", "seq", ".", "shape", "[", "1", "]", ",", "1", "]", "=", "[", "\",\"", ".", "join", "(", "[", "j", "for", "j", "in", "i", "if", "j", "]", ")", "for", "i", "in", "alleles", "[", ":", ",", "1", ":", "]", ".", "view", "(", "\"S1\"", ")", ".", "tolist", "(", ")", "]", "## advance counter", "init", "+=", "seq", ".", "shape", "[", "1", "]", "## trim off empty rows if they exist", "withdat", "=", "cols0", "!=", "0", "tot", "=", "withdat", ".", "sum", "(", ")", "## get scaffold names", "faidict", "=", "{", "}", "if", "(", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", "in", "[", "\"reference\"", ",", "\"denovo+reference\"", "]", ")", "and", "(", "os", ".", "path", ".", "exists", "(", "data", ".", "paramsdict", "[", "\"reference_sequence\"", "]", ")", ")", ":", "fai", "=", "pd", ".", "read_csv", "(", "data", ".", "paramsdict", "[", "\"reference_sequence\"", "]", "+", "\".fai\"", ",", "names", "=", "[", "'scaffold'", ",", "'size'", ",", "'sumsize'", ",", "'a'", ",", "'b'", "]", ",", "sep", "=", "\"\\t\"", ")", "faidict", "=", "{", "i", "+", "1", ":", "j", "for", "i", ",", "j", "in", "enumerate", "(", "fai", ".", "scaffold", ")", "}", "try", ":", "## This is hax, but it's the only way it will work. The faidict uses positive numbers", "## for reference sequence mapped loci for the CHROM/POS info, and it uses negative", "## numbers for anonymous loci. Both are 1 indexed, which is where that last `+ 2` comes from.", "faidict", ".", "update", "(", "{", "-", "i", ":", "\"locus_{}\"", ".", "format", "(", "i", "-", "1", ")", "for", "i", "in", "xrange", "(", "chunk", "+", "1", ",", "chunk", "+", "optim", "+", "2", ")", "}", ")", "chroms", "=", "[", "faidict", "[", "i", "]", "for", "i", "in", "cols0", "]", "except", "Exception", "as", "inst", ":", "LOGGER", ".", "error", "(", "\"Invalid chromosome dictionary indexwat: {}\"", ".", "format", "(", "inst", ")", ")", "LOGGER", ".", "debug", "(", "\"faidict {}\"", ".", "format", "(", "[", "str", "(", "k", ")", "+", "\"/\"", "+", "str", "(", "v", ")", "for", "k", ",", "v", "in", "faidict", ".", "items", "(", ")", "if", "\"locus\"", "in", "v", "]", ")", ")", "LOGGER", ".", "debug", "(", "\"chroms {}\"", ".", "format", "(", "[", "x", "for", "x", "in", "cols0", "if", "x", "<", "0", "]", ")", ")", "raise", "cols0", "=", "np", ".", "array", "(", "chroms", ")", "#else:", "# cols0 = np.array([\"locus_{}\".format(i) for i in cols0-1])", "## Only write if there is some data that passed filtering", "if", "tot", ":", "LOGGER", ".", "debug", "(", "\"Writing data to vcf\"", ")", "if", "not", "full", ":", "writer", "=", "open", "(", "data", ".", "outfiles", ".", "vcf", "+", "\".{}\"", ".", "format", "(", "chunk", ")", ",", "'w'", ")", "else", ":", "writer", "=", "gzip", ".", "open", "(", "data", ".", "outfiles", ".", "vcf", "+", "\".{}\"", ".", "format", "(", "chunk", ")", ",", "'w'", ")", "try", ":", "## write in iterations b/c it can be freakin huge.", "## for cols0 and cols1 the 'newaxis' slice and the transpose", "## are for turning the 1d arrays into column vectors.", "np", ".", "savetxt", "(", "writer", ",", "np", ".", "concatenate", "(", "(", "cols0", "[", ":", "tot", "]", "[", "np", ".", "newaxis", "]", ".", "T", ",", "cols1", "[", ":", "tot", "]", "[", "np", ".", "newaxis", "]", ".", "T", ",", "np", ".", "array", "(", "[", "[", "\".\"", "]", "]", "*", "tot", ",", "dtype", "=", "\"S1\"", ")", ",", "cols34", "[", ":", "tot", ",", ":", "]", ",", "np", ".", "array", "(", "[", "[", "\"13\"", ",", "\"PASS\"", "]", "]", "*", "tot", ",", "dtype", "=", "\"S4\"", ")", ",", "cols7", "[", ":", "tot", ",", ":", "]", ",", "np", ".", "array", "(", "[", "[", "\"GT:DP:CATG\"", "]", "]", "*", "tot", ",", "dtype", "=", "\"S10\"", ")", ",", "htmp", "[", "\"vcf\"", "]", "[", ":", "tot", ",", ":", "]", ",", ")", ",", "axis", "=", "1", ")", ",", "delimiter", "=", "\"\\t\"", ",", "fmt", "=", "\"%s\"", ")", "except", "Exception", "as", "inst", ":", "LOGGER", ".", "error", "(", "\"Error building vcf file - \"", ".", "format", "(", "inst", ")", ")", "raise", "writer", ".", "close", "(", ")", "## close and remove tmp h5", "htmp", ".", "close", "(", ")", "os", ".", "remove", "(", "tmph", ")" ]
Function called within make_vcf to run chunks on separate engines.
[ "Function", "called", "within", "make_vcf", "to", "run", "chunks", "on", "separate", "engines", "." ]
python
valid
41.268722
ranaroussi/ezibpy
ezibpy/ezibpy.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1931-L1941
def cancelHistoricalData(self, contracts=None): """ cancel historical data stream """ if contracts == None: contracts = list(self.contracts.values()) elif not isinstance(contracts, list): contracts = [contracts] for contract in contracts: # tickerId = self.tickerId(contract.m_symbol) tickerId = self.tickerId(self.contractString(contract)) self.ibConn.cancelHistoricalData(tickerId=tickerId)
[ "def", "cancelHistoricalData", "(", "self", ",", "contracts", "=", "None", ")", ":", "if", "contracts", "==", "None", ":", "contracts", "=", "list", "(", "self", ".", "contracts", ".", "values", "(", ")", ")", "elif", "not", "isinstance", "(", "contracts", ",", "list", ")", ":", "contracts", "=", "[", "contracts", "]", "for", "contract", "in", "contracts", ":", "# tickerId = self.tickerId(contract.m_symbol)", "tickerId", "=", "self", ".", "tickerId", "(", "self", ".", "contractString", "(", "contract", ")", ")", "self", ".", "ibConn", ".", "cancelHistoricalData", "(", "tickerId", "=", "tickerId", ")" ]
cancel historical data stream
[ "cancel", "historical", "data", "stream" ]
python
train
43.181818
oceanprotocol/squid-py
squid_py/brizo/brizo.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L204-L218
def write_file(response, destination_folder, file_name): """ Write the response content in a file in the destination folder. :param response: Response :param destination_folder: Destination folder, string :param file_name: File name, string :return: bool """ if response.status_code == 200: with open(os.path.join(destination_folder, file_name), 'wb') as f: for chunk in response.iter_content(chunk_size=None): f.write(chunk) logger.info(f'Saved downloaded file in {f.name}') else: logger.warning(f'consume failed: {response.reason}')
[ "def", "write_file", "(", "response", ",", "destination_folder", ",", "file_name", ")", ":", "if", "response", ".", "status_code", "==", "200", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "destination_folder", ",", "file_name", ")", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "None", ")", ":", "f", ".", "write", "(", "chunk", ")", "logger", ".", "info", "(", "f'Saved downloaded file in {f.name}'", ")", "else", ":", "logger", ".", "warning", "(", "f'consume failed: {response.reason}'", ")" ]
Write the response content in a file in the destination folder. :param response: Response :param destination_folder: Destination folder, string :param file_name: File name, string :return: bool
[ "Write", "the", "response", "content", "in", "a", "file", "in", "the", "destination", "folder", ".", ":", "param", "response", ":", "Response", ":", "param", "destination_folder", ":", "Destination", "folder", "string", ":", "param", "file_name", ":", "File", "name", "string", ":", "return", ":", "bool" ]
python
train
44.266667
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAMarket/QAShipaneBroker.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAMarket/QAShipaneBroker.py#L297-L372
def query_orders(self, accounts, status='filled'): """查询订单 Arguments: accounts {[type]} -- [description] Keyword Arguments: status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'}) Returns: [type] -- [description] """ try: data = self.call("orders", {'client': accounts, 'status': status}) if data is not None: orders = data.get('dataTable', False) order_headers = orders['columns'] if ('成交状态' in order_headers or '状态说明' in order_headers) and ('备注' in order_headers): order_headers[order_headers.index('备注')] = '废弃' order_headers = [cn_en_compare[item] for item in order_headers] order_all = pd.DataFrame( orders['rows'], columns=order_headers ).assign(account_cookie=accounts) order_all.towards = order_all.towards.apply( lambda x: trade_towards_cn_en[x] ) if 'order_time' in order_headers: # 这是order_status order_all['status'] = order_all.status.apply( lambda x: order_status_cn_en[x] ) if 'order_date' not in order_headers: order_all.order_time = order_all.order_time.apply( lambda x: QA_util_get_order_datetime( dt='{} {}'.format(datetime.date.today(), x) ) ) else: order_all = order_all.assign( order_time=order_all.order_date .apply(QA_util_date_int2str) + ' ' + order_all.order_time ) if 'trade_time' in order_headers: order_all.trade_time = order_all.trade_time.apply( lambda x: '{} {}'.format(datetime.date.today(), x) ) if status is 'filled': return order_all.loc[:, self.dealstatus_headers].set_index( ['account_cookie', 'realorder_id'] ).sort_index() else: return order_all.loc[:, self.orderstatus_headers].set_index( ['account_cookie', 'realorder_id'] ).sort_index() else: print('response is None') return False except Exception as e: print(e) return False
[ "def", "query_orders", "(", "self", ",", "accounts", ",", "status", "=", "'filled'", ")", ":", "try", ":", "data", "=", "self", ".", "call", "(", "\"orders\"", ",", "{", "'client'", ":", "accounts", ",", "'status'", ":", "status", "}", ")", "if", "data", "is", "not", "None", ":", "orders", "=", "data", ".", "get", "(", "'dataTable'", ",", "False", ")", "order_headers", "=", "orders", "[", "'columns'", "]", "if", "(", "'成交状态' in orde", "_h", "aders", "or", "'状态说明' in orde", "_h", "aders) and ('", "备", "' i", " ", "order_he", "de", "s):", "", "", "order_headers", "[", "order_headers", ".", "index", "(", "'备注')] =", " ", "'", "弃", "", "order_headers", "=", "[", "cn_en_compare", "[", "item", "]", "for", "item", "in", "order_headers", "]", "order_all", "=", "pd", ".", "DataFrame", "(", "orders", "[", "'rows'", "]", ",", "columns", "=", "order_headers", ")", ".", "assign", "(", "account_cookie", "=", "accounts", ")", "order_all", ".", "towards", "=", "order_all", ".", "towards", ".", "apply", "(", "lambda", "x", ":", "trade_towards_cn_en", "[", "x", "]", ")", "if", "'order_time'", "in", "order_headers", ":", "# 这是order_status", "order_all", "[", "'status'", "]", "=", "order_all", ".", "status", ".", "apply", "(", "lambda", "x", ":", "order_status_cn_en", "[", "x", "]", ")", "if", "'order_date'", "not", "in", "order_headers", ":", "order_all", ".", "order_time", "=", "order_all", ".", "order_time", ".", "apply", "(", "lambda", "x", ":", "QA_util_get_order_datetime", "(", "dt", "=", "'{} {}'", ".", "format", "(", "datetime", ".", "date", ".", "today", "(", ")", ",", "x", ")", ")", ")", "else", ":", "order_all", "=", "order_all", ".", "assign", "(", "order_time", "=", "order_all", ".", "order_date", ".", "apply", "(", "QA_util_date_int2str", ")", "+", "' '", "+", "order_all", ".", "order_time", ")", "if", "'trade_time'", "in", "order_headers", ":", "order_all", ".", "trade_time", "=", "order_all", ".", "trade_time", ".", "apply", "(", "lambda", "x", ":", "'{} {}'", ".", "format", "(", "datetime", ".", "date", ".", "today", "(", ")", ",", "x", ")", ")", "if", "status", "is", "'filled'", ":", "return", "order_all", ".", "loc", "[", ":", ",", "self", ".", "dealstatus_headers", "]", ".", "set_index", "(", "[", "'account_cookie'", ",", "'realorder_id'", "]", ")", ".", "sort_index", "(", ")", "else", ":", "return", "order_all", ".", "loc", "[", ":", ",", "self", ".", "orderstatus_headers", "]", ".", "set_index", "(", "[", "'account_cookie'", ",", "'realorder_id'", "]", ")", ".", "sort_index", "(", ")", "else", ":", "print", "(", "'response is None'", ")", "return", "False", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "return", "False" ]
查询订单 Arguments: accounts {[type]} -- [description] Keyword Arguments: status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'}) Returns: [type] -- [description]
[ "查询订单" ]
python
train
39.302632
tox-dev/tox-travis
src/tox_travis/hooks.py
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/hooks.py#L31-L59
def tox_configure(config): """Check for the presence of the added options.""" if 'TRAVIS' not in os.environ: return ini = config._cfg # envlist if 'TOXENV' not in os.environ and not config.option.env: envlist = detect_envlist(ini) undeclared = set(envlist) - set(config.envconfigs) if undeclared: print('Matching undeclared envs is deprecated. Be sure all the ' 'envs that Tox should run are declared in the tox config.', file=sys.stderr) autogen_envconfigs(config, undeclared) config.envlist = envlist # Override ignore_outcomes if override_ignore_outcome(ini): for envconfig in config.envconfigs.values(): envconfig.ignore_outcome = False # after if config.option.travis_after: print('The after all feature has been deprecated. Check out Travis\' ' 'build stages, which are a better solution. ' 'See https://tox-travis.readthedocs.io/en/stable/after.html ' 'for more details.', file=sys.stderr)
[ "def", "tox_configure", "(", "config", ")", ":", "if", "'TRAVIS'", "not", "in", "os", ".", "environ", ":", "return", "ini", "=", "config", ".", "_cfg", "# envlist", "if", "'TOXENV'", "not", "in", "os", ".", "environ", "and", "not", "config", ".", "option", ".", "env", ":", "envlist", "=", "detect_envlist", "(", "ini", ")", "undeclared", "=", "set", "(", "envlist", ")", "-", "set", "(", "config", ".", "envconfigs", ")", "if", "undeclared", ":", "print", "(", "'Matching undeclared envs is deprecated. Be sure all the '", "'envs that Tox should run are declared in the tox config.'", ",", "file", "=", "sys", ".", "stderr", ")", "autogen_envconfigs", "(", "config", ",", "undeclared", ")", "config", ".", "envlist", "=", "envlist", "# Override ignore_outcomes", "if", "override_ignore_outcome", "(", "ini", ")", ":", "for", "envconfig", "in", "config", ".", "envconfigs", ".", "values", "(", ")", ":", "envconfig", ".", "ignore_outcome", "=", "False", "# after", "if", "config", ".", "option", ".", "travis_after", ":", "print", "(", "'The after all feature has been deprecated. Check out Travis\\' '", "'build stages, which are a better solution. '", "'See https://tox-travis.readthedocs.io/en/stable/after.html '", "'for more details.'", ",", "file", "=", "sys", ".", "stderr", ")" ]
Check for the presence of the added options.
[ "Check", "for", "the", "presence", "of", "the", "added", "options", "." ]
python
train
37.172414
mojaie/chorus
chorus/patty.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/patty.py#L7-L50
def assign_type(mol, force_recalc=False): """ PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762] TODO: not yet implemented 1:cation 2:anion 3:donor 4:acceptor 5:polar 6:hydrophobe 7:others """ if "PATTY" in mol.descriptors and not force_recalc: return mol.require("Phys_charge") for i, atom in mol.atoms_iter(): # default is 7 (others) nbrcnt = mol.neighbor_count(i) if atom.charge > 0 or atom.charge_phys > 0 or \ atom.charge_conj > 0 and not atom.n_oxide: atom.type = 1 # cation elif atom.charge < 0 or atom.charge_phys < 0 or \ atom.charge_conj < 0 and not atom.n_oxide: atom.type = 2 # anion elif atom.symbol == "N": if nbrcnt in (1, 2): if atom.pi == 2: atom.type = 3 # donor elif atom.pi == 1: atom.type = 4 # acceptor elif atom.symbol == "O": if nbrcnt == 1 and not atom.pi: atom.type = 5 # polar else: atom.type = 4 # acceptor elif atom.symbol in ("C", "Si", "S", "Se", "P", "As"): ewg = False for n, bond in mol.neighbors(i).items(): natom = mol.atom(n) if natom.symbol in ("N", "O", "S") and atom.pi \ and not (natom.pi == 2 and mol.neighbor_count(n) == 3): # the sp2 adjacent to neg (but not conj tert amine) is 7 ewg = True break if not ewg: atom.type = 6 # hydrophobes elif atom.symbol in ("F", "Cl", "Br", "I") and nbrcnt == 1: atom.type = 6 # typical halogens are hydrophobic mol.descriptors.add("PATTY")
[ "def", "assign_type", "(", "mol", ",", "force_recalc", "=", "False", ")", ":", "if", "\"PATTY\"", "in", "mol", ".", "descriptors", "and", "not", "force_recalc", ":", "return", "mol", ".", "require", "(", "\"Phys_charge\"", ")", "for", "i", ",", "atom", "in", "mol", ".", "atoms_iter", "(", ")", ":", "# default is 7 (others)", "nbrcnt", "=", "mol", ".", "neighbor_count", "(", "i", ")", "if", "atom", ".", "charge", ">", "0", "or", "atom", ".", "charge_phys", ">", "0", "or", "atom", ".", "charge_conj", ">", "0", "and", "not", "atom", ".", "n_oxide", ":", "atom", ".", "type", "=", "1", "# cation", "elif", "atom", ".", "charge", "<", "0", "or", "atom", ".", "charge_phys", "<", "0", "or", "atom", ".", "charge_conj", "<", "0", "and", "not", "atom", ".", "n_oxide", ":", "atom", ".", "type", "=", "2", "# anion", "elif", "atom", ".", "symbol", "==", "\"N\"", ":", "if", "nbrcnt", "in", "(", "1", ",", "2", ")", ":", "if", "atom", ".", "pi", "==", "2", ":", "atom", ".", "type", "=", "3", "# donor", "elif", "atom", ".", "pi", "==", "1", ":", "atom", ".", "type", "=", "4", "# acceptor", "elif", "atom", ".", "symbol", "==", "\"O\"", ":", "if", "nbrcnt", "==", "1", "and", "not", "atom", ".", "pi", ":", "atom", ".", "type", "=", "5", "# polar", "else", ":", "atom", ".", "type", "=", "4", "# acceptor", "elif", "atom", ".", "symbol", "in", "(", "\"C\"", ",", "\"Si\"", ",", "\"S\"", ",", "\"Se\"", ",", "\"P\"", ",", "\"As\"", ")", ":", "ewg", "=", "False", "for", "n", ",", "bond", "in", "mol", ".", "neighbors", "(", "i", ")", ".", "items", "(", ")", ":", "natom", "=", "mol", ".", "atom", "(", "n", ")", "if", "natom", ".", "symbol", "in", "(", "\"N\"", ",", "\"O\"", ",", "\"S\"", ")", "and", "atom", ".", "pi", "and", "not", "(", "natom", ".", "pi", "==", "2", "and", "mol", ".", "neighbor_count", "(", "n", ")", "==", "3", ")", ":", "# the sp2 adjacent to neg (but not conj tert amine) is 7", "ewg", "=", "True", "break", "if", "not", "ewg", ":", "atom", ".", "type", "=", "6", "# hydrophobes", "elif", "atom", ".", "symbol", "in", "(", "\"F\"", ",", "\"Cl\"", ",", "\"Br\"", ",", "\"I\"", ")", "and", "nbrcnt", "==", "1", ":", "atom", ".", "type", "=", "6", "# typical halogens are hydrophobic", "mol", ".", "descriptors", ".", "add", "(", "\"PATTY\"", ")" ]
PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762] TODO: not yet implemented 1:cation 2:anion 3:donor 4:acceptor 5:polar 6:hydrophobe 7:others
[ "PATTY", "[", "Bush", "et", "al", ".", "J", ".", "Inf", ".", "Comput", ".", "Sci", "33", "(", "1993", ")", "756", "-", "762", "]", "TODO", ":", "not", "yet", "implemented" ]
python
train
40.363636
pyviz/holoviews
holoviews/plotting/mpl/chart.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/chart.py#L422-L428
def _process_axsettings(self, hist, lims, ticks): """ Get axis settings options including ticks, x- and y-labels and limits. """ axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)])) return axis_settings
[ "def", "_process_axsettings", "(", "self", ",", "hist", ",", "lims", ",", "ticks", ")", ":", "axis_settings", "=", "dict", "(", "zip", "(", "self", ".", "axis_settings", ",", "[", "None", ",", "None", ",", "(", "None", "if", "self", ".", "overlaid", "else", "ticks", ")", "]", ")", ")", "return", "axis_settings" ]
Get axis settings options including ticks, x- and y-labels and limits.
[ "Get", "axis", "settings", "options", "including", "ticks", "x", "-", "and", "y", "-", "labels", "and", "limits", "." ]
python
train
41
eaton-lab/toytree
toytree/newick.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/newick.py#L362-L401
def write_newick(rootnode, features=None, format=1, format_root_node=True, is_leaf_fn=None, dist_formatter=None, support_formatter=None, name_formatter=None): """ Iteratively export a tree structure and returns its NHX representation. """ newick = [] leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children) for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn): if postorder: newick.append(")") if node.up is not None or format_root_node: newick.append(format_node(node, "internal", format, dist_formatter=dist_formatter, support_formatter=support_formatter, name_formatter=name_formatter)) newick.append(_get_features_string(node, features)) else: if node is not rootnode and node != node.up.children[0]: newick.append(",") if leaf(node): safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \ str(getattr(node, "name"))) newick.append(format_node(node, "leaf", format, dist_formatter=dist_formatter, support_formatter=support_formatter, name_formatter=name_formatter)) newick.append(_get_features_string(node, features)) else: newick.append("(") newick.append(";") return ''.join(newick)
[ "def", "write_newick", "(", "rootnode", ",", "features", "=", "None", ",", "format", "=", "1", ",", "format_root_node", "=", "True", ",", "is_leaf_fn", "=", "None", ",", "dist_formatter", "=", "None", ",", "support_formatter", "=", "None", ",", "name_formatter", "=", "None", ")", ":", "newick", "=", "[", "]", "leaf", "=", "is_leaf_fn", "if", "is_leaf_fn", "else", "lambda", "n", ":", "not", "bool", "(", "n", ".", "children", ")", "for", "postorder", ",", "node", "in", "rootnode", ".", "iter_prepostorder", "(", "is_leaf_fn", "=", "is_leaf_fn", ")", ":", "if", "postorder", ":", "newick", ".", "append", "(", "\")\"", ")", "if", "node", ".", "up", "is", "not", "None", "or", "format_root_node", ":", "newick", ".", "append", "(", "format_node", "(", "node", ",", "\"internal\"", ",", "format", ",", "dist_formatter", "=", "dist_formatter", ",", "support_formatter", "=", "support_formatter", ",", "name_formatter", "=", "name_formatter", ")", ")", "newick", ".", "append", "(", "_get_features_string", "(", "node", ",", "features", ")", ")", "else", ":", "if", "node", "is", "not", "rootnode", "and", "node", "!=", "node", ".", "up", ".", "children", "[", "0", "]", ":", "newick", ".", "append", "(", "\",\"", ")", "if", "leaf", "(", "node", ")", ":", "safe_name", "=", "re", ".", "sub", "(", "\"[\"", "+", "_ILEGAL_NEWICK_CHARS", "+", "\"]\"", ",", "\"_\"", ",", "str", "(", "getattr", "(", "node", ",", "\"name\"", ")", ")", ")", "newick", ".", "append", "(", "format_node", "(", "node", ",", "\"leaf\"", ",", "format", ",", "dist_formatter", "=", "dist_formatter", ",", "support_formatter", "=", "support_formatter", ",", "name_formatter", "=", "name_formatter", ")", ")", "newick", ".", "append", "(", "_get_features_string", "(", "node", ",", "features", ")", ")", "else", ":", "newick", ".", "append", "(", "\"(\"", ")", "newick", ".", "append", "(", "\";\"", ")", "return", "''", ".", "join", "(", "newick", ")" ]
Iteratively export a tree structure and returns its NHX representation.
[ "Iteratively", "export", "a", "tree", "structure", "and", "returns", "its", "NHX", "representation", "." ]
python
train
39.5
itamarst/eliot
eliot/_action.py
https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_action.py#L643-L649
def children(self): """ The list of child messages and actions sorted by task level, excluding the start and end messages. """ return pvector( sorted(self._children.values(), key=lambda m: m.task_level))
[ "def", "children", "(", "self", ")", ":", "return", "pvector", "(", "sorted", "(", "self", ".", "_children", ".", "values", "(", ")", ",", "key", "=", "lambda", "m", ":", "m", ".", "task_level", ")", ")" ]
The list of child messages and actions sorted by task level, excluding the start and end messages.
[ "The", "list", "of", "child", "messages", "and", "actions", "sorted", "by", "task", "level", "excluding", "the", "start", "and", "end", "messages", "." ]
python
train
35.571429
urinieto/msaf
msaf/algorithms/fmc2d/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/segmenter.py#L49-L93
def feat_segments_to_2dfmc_max(feat_segments, offset=4): """From a list of feature segments, return a list of 2D-Fourier Magnitude Coefs using the maximum segment size as main size and zero pad the rest. Parameters ---------- feat_segments: list List of segments, one for each boundary interval. offset: int >= 0 Number of frames to ignore from beginning and end of each segment. Returns ------- fmcs: np.ndarray Tensor containing the 2D-FMC matrices, one matrix per segment. """ if len(feat_segments) == 0: return [] # Get maximum segment size max_len = max([feat_segment.shape[0] for feat_segment in feat_segments]) fmcs = [] for feat_segment in feat_segments: # Zero pad if needed X = np.zeros((max_len, feat_segment.shape[1])) # Remove a set of frames in the beginning an end of the segment if feat_segment.shape[0] <= offset or offset == 0: X[:feat_segment.shape[0], :] = feat_segment else: X[:feat_segment.shape[0] - offset, :] = \ feat_segment[offset // 2:-offset // 2, :] # Compute the 2D-FMC try: fmcs.append(utils2d.compute_ffmc2d(X)) except: logging.warning("Couldn't compute the 2D Fourier Transform") fmcs.append(np.zeros((X.shape[0] * X.shape[1]) // 2 + 1)) # Normalize # fmcs[-1] = fmcs[-1] / float(fmcs[-1].max()) return np.asarray(fmcs)
[ "def", "feat_segments_to_2dfmc_max", "(", "feat_segments", ",", "offset", "=", "4", ")", ":", "if", "len", "(", "feat_segments", ")", "==", "0", ":", "return", "[", "]", "# Get maximum segment size", "max_len", "=", "max", "(", "[", "feat_segment", ".", "shape", "[", "0", "]", "for", "feat_segment", "in", "feat_segments", "]", ")", "fmcs", "=", "[", "]", "for", "feat_segment", "in", "feat_segments", ":", "# Zero pad if needed", "X", "=", "np", ".", "zeros", "(", "(", "max_len", ",", "feat_segment", ".", "shape", "[", "1", "]", ")", ")", "# Remove a set of frames in the beginning an end of the segment", "if", "feat_segment", ".", "shape", "[", "0", "]", "<=", "offset", "or", "offset", "==", "0", ":", "X", "[", ":", "feat_segment", ".", "shape", "[", "0", "]", ",", ":", "]", "=", "feat_segment", "else", ":", "X", "[", ":", "feat_segment", ".", "shape", "[", "0", "]", "-", "offset", ",", ":", "]", "=", "feat_segment", "[", "offset", "//", "2", ":", "-", "offset", "//", "2", ",", ":", "]", "# Compute the 2D-FMC", "try", ":", "fmcs", ".", "append", "(", "utils2d", ".", "compute_ffmc2d", "(", "X", ")", ")", "except", ":", "logging", ".", "warning", "(", "\"Couldn't compute the 2D Fourier Transform\"", ")", "fmcs", ".", "append", "(", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", "*", "X", ".", "shape", "[", "1", "]", ")", "//", "2", "+", "1", ")", ")", "# Normalize", "# fmcs[-1] = fmcs[-1] / float(fmcs[-1].max())", "return", "np", ".", "asarray", "(", "fmcs", ")" ]
From a list of feature segments, return a list of 2D-Fourier Magnitude Coefs using the maximum segment size as main size and zero pad the rest. Parameters ---------- feat_segments: list List of segments, one for each boundary interval. offset: int >= 0 Number of frames to ignore from beginning and end of each segment. Returns ------- fmcs: np.ndarray Tensor containing the 2D-FMC matrices, one matrix per segment.
[ "From", "a", "list", "of", "feature", "segments", "return", "a", "list", "of", "2D", "-", "Fourier", "Magnitude", "Coefs", "using", "the", "maximum", "segment", "size", "as", "main", "size", "and", "zero", "pad", "the", "rest", "." ]
python
test
32.644444
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1909-L1919
def atanh(x, context=None): """ Return the inverse hyperbolic tangent of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_atanh, (BigFloat._implicit_convert(x),), context, )
[ "def", "atanh", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_atanh", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ")" ]
Return the inverse hyperbolic tangent of x.
[ "Return", "the", "inverse", "hyperbolic", "tangent", "of", "x", "." ]
python
train
21.545455
google/grr
grr/server/grr_response_server/gui/http_api.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/http_api.py#L119-L169
def _GetArgsFromRequest(self, request, method_metadata, route_args): """Builds args struct out of HTTP request.""" format_mode = GetRequestFormatMode(request, method_metadata) if request.method in ["GET", "HEAD"]: if method_metadata.args_type: unprocessed_request = request.args if hasattr(unprocessed_request, "dict"): unprocessed_request = unprocessed_request.dict() args = method_metadata.args_type() for type_info in args.type_infos: if type_info.name in route_args: self._SetField(args, type_info, route_args[type_info.name]) elif type_info.name in unprocessed_request: self._SetField(args, type_info, unprocessed_request[type_info.name]) else: args = None elif request.method in ["POST", "DELETE", "PATCH"]: try: args = method_metadata.args_type() for type_info in args.type_infos: if type_info.name in route_args: self._SetField(args, type_info, route_args[type_info.name]) if request.content_type and request.content_type.startswith( "multipart/form-data;"): payload = json.Parse(request.form["_params_"].decode("utf-8")) args.FromDict(payload) for name, fd in iteritems(request.files): args.Set(name, fd.read()) elif format_mode == JsonMode.PROTO3_JSON_MODE: # NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue. args_proto = args.protobuf() json_format.Parse(request.get_data(as_text=True) or "{}", args_proto) args.ParseFromString(args_proto.SerializeToString()) else: json_data = request.get_data(as_text=True) or "{}" payload = json.Parse(json_data) if payload: args.FromDict(payload) except Exception as e: # pylint: disable=broad-except logging.exception("Error while parsing POST request %s (%s): %s", request.path, request.method, e) raise PostRequestParsingError(e) else: raise UnsupportedHttpMethod("Unsupported method: %s." % request.method) return args
[ "def", "_GetArgsFromRequest", "(", "self", ",", "request", ",", "method_metadata", ",", "route_args", ")", ":", "format_mode", "=", "GetRequestFormatMode", "(", "request", ",", "method_metadata", ")", "if", "request", ".", "method", "in", "[", "\"GET\"", ",", "\"HEAD\"", "]", ":", "if", "method_metadata", ".", "args_type", ":", "unprocessed_request", "=", "request", ".", "args", "if", "hasattr", "(", "unprocessed_request", ",", "\"dict\"", ")", ":", "unprocessed_request", "=", "unprocessed_request", ".", "dict", "(", ")", "args", "=", "method_metadata", ".", "args_type", "(", ")", "for", "type_info", "in", "args", ".", "type_infos", ":", "if", "type_info", ".", "name", "in", "route_args", ":", "self", ".", "_SetField", "(", "args", ",", "type_info", ",", "route_args", "[", "type_info", ".", "name", "]", ")", "elif", "type_info", ".", "name", "in", "unprocessed_request", ":", "self", ".", "_SetField", "(", "args", ",", "type_info", ",", "unprocessed_request", "[", "type_info", ".", "name", "]", ")", "else", ":", "args", "=", "None", "elif", "request", ".", "method", "in", "[", "\"POST\"", ",", "\"DELETE\"", ",", "\"PATCH\"", "]", ":", "try", ":", "args", "=", "method_metadata", ".", "args_type", "(", ")", "for", "type_info", "in", "args", ".", "type_infos", ":", "if", "type_info", ".", "name", "in", "route_args", ":", "self", ".", "_SetField", "(", "args", ",", "type_info", ",", "route_args", "[", "type_info", ".", "name", "]", ")", "if", "request", ".", "content_type", "and", "request", ".", "content_type", ".", "startswith", "(", "\"multipart/form-data;\"", ")", ":", "payload", "=", "json", ".", "Parse", "(", "request", ".", "form", "[", "\"_params_\"", "]", ".", "decode", "(", "\"utf-8\"", ")", ")", "args", ".", "FromDict", "(", "payload", ")", "for", "name", ",", "fd", "in", "iteritems", "(", "request", ".", "files", ")", ":", "args", ".", "Set", "(", "name", ",", "fd", ".", "read", "(", ")", ")", "elif", "format_mode", "==", "JsonMode", ".", "PROTO3_JSON_MODE", ":", "# NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue.", "args_proto", "=", "args", ".", "protobuf", "(", ")", "json_format", ".", "Parse", "(", "request", ".", "get_data", "(", "as_text", "=", "True", ")", "or", "\"{}\"", ",", "args_proto", ")", "args", ".", "ParseFromString", "(", "args_proto", ".", "SerializeToString", "(", ")", ")", "else", ":", "json_data", "=", "request", ".", "get_data", "(", "as_text", "=", "True", ")", "or", "\"{}\"", "payload", "=", "json", ".", "Parse", "(", "json_data", ")", "if", "payload", ":", "args", ".", "FromDict", "(", "payload", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "logging", ".", "exception", "(", "\"Error while parsing POST request %s (%s): %s\"", ",", "request", ".", "path", ",", "request", ".", "method", ",", "e", ")", "raise", "PostRequestParsingError", "(", "e", ")", "else", ":", "raise", "UnsupportedHttpMethod", "(", "\"Unsupported method: %s.\"", "%", "request", ".", "method", ")", "return", "args" ]
Builds args struct out of HTTP request.
[ "Builds", "args", "struct", "out", "of", "HTTP", "request", "." ]
python
train
41.72549
thombashi/tcconfig
tcconfig/traffic_control.py
https://github.com/thombashi/tcconfig/blob/9612dcd6ac9c072e7aa9eb702a225c559936bad3/tcconfig/traffic_control.py#L213-L226
def get_tc_device(self): """ Return a device name that associated network communication direction. """ if self.direction == TrafficDirection.OUTGOING: return self.device if self.direction == TrafficDirection.INCOMING: return self.ifb_device raise ParameterError( "unknown direction", expected=TrafficDirection.LIST, value=self.direction )
[ "def", "get_tc_device", "(", "self", ")", ":", "if", "self", ".", "direction", "==", "TrafficDirection", ".", "OUTGOING", ":", "return", "self", ".", "device", "if", "self", ".", "direction", "==", "TrafficDirection", ".", "INCOMING", ":", "return", "self", ".", "ifb_device", "raise", "ParameterError", "(", "\"unknown direction\"", ",", "expected", "=", "TrafficDirection", ".", "LIST", ",", "value", "=", "self", ".", "direction", ")" ]
Return a device name that associated network communication direction.
[ "Return", "a", "device", "name", "that", "associated", "network", "communication", "direction", "." ]
python
train
30
iotile/coretools
transport_plugins/jlink/iotile_transport_jlink/jlink.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/jlink/iotile_transport_jlink/jlink.py#L179-L207
def probe_async(self, callback): """Send advertisements for all connected devices. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe """ def _on_finished(_name, control_info, exception): if exception is not None: callback(self.id, False, str(exception)) return self._control_info = control_info try: info = { 'connection_string': "direct", 'uuid': control_info.uuid, 'signal_strength': 100 } self._trigger_callback('on_scan', self.id, info, self.ExpirationTime) finally: callback(self.id, True, None) self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)
[ "def", "probe_async", "(", "self", ",", "callback", ")", ":", "def", "_on_finished", "(", "_name", ",", "control_info", ",", "exception", ")", ":", "if", "exception", "is", "not", "None", ":", "callback", "(", "self", ".", "id", ",", "False", ",", "str", "(", "exception", ")", ")", "return", "self", ".", "_control_info", "=", "control_info", "try", ":", "info", "=", "{", "'connection_string'", ":", "\"direct\"", ",", "'uuid'", ":", "control_info", ".", "uuid", ",", "'signal_strength'", ":", "100", "}", "self", ".", "_trigger_callback", "(", "'on_scan'", ",", "self", ".", "id", ",", "info", ",", "self", ".", "ExpirationTime", ")", "finally", ":", "callback", "(", "self", ".", "id", ",", "True", ",", "None", ")", "self", ".", "_control_thread", ".", "command", "(", "JLinkControlThread", ".", "FIND_CONTROL", ",", "_on_finished", ",", "self", ".", "_device_info", ".", "ram_start", ",", "self", ".", "_device_info", ".", "ram_size", ")" ]
Send advertisements for all connected devices. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe
[ "Send", "advertisements", "for", "all", "connected", "devices", "." ]
python
train
39.310345
f3at/feat
src/feat/database/emu.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/database/emu.py#L310-L319
def load_fixture(self, body, attachment_bodies={}): ''' Loads the document into the database from json string. Fakes the attachments if necessary.''' doc = json.loads(body) self._documents[doc['_id']] = doc self._attachments[doc['_id']] = dict() for name in doc.get('_attachments', list()): attachment_body = attachment_bodies.get(name, 'stub') self._attachments[doc['_id']][name] = attachment_body
[ "def", "load_fixture", "(", "self", ",", "body", ",", "attachment_bodies", "=", "{", "}", ")", ":", "doc", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_documents", "[", "doc", "[", "'_id'", "]", "]", "=", "doc", "self", ".", "_attachments", "[", "doc", "[", "'_id'", "]", "]", "=", "dict", "(", ")", "for", "name", "in", "doc", ".", "get", "(", "'_attachments'", ",", "list", "(", ")", ")", ":", "attachment_body", "=", "attachment_bodies", ".", "get", "(", "name", ",", "'stub'", ")", "self", ".", "_attachments", "[", "doc", "[", "'_id'", "]", "]", "[", "name", "]", "=", "attachment_body" ]
Loads the document into the database from json string. Fakes the attachments if necessary.
[ "Loads", "the", "document", "into", "the", "database", "from", "json", "string", ".", "Fakes", "the", "attachments", "if", "necessary", "." ]
python
train
46.9
automl/HpBandSter
hpbandster/examples/commons.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/examples/commons.py#L22-L46
def compute(self, config, budget, **kwargs): """ Simple example for a compute function The loss is just a the config + some noise (that decreases with the budget) For dramatization, the function can sleep for a given interval to emphasizes the speed ups achievable with parallel workers. Args: config: dictionary containing the sampled configurations by the optimizer budget: (float) amount of time/epochs/etc. the model can use to train Returns: dictionary with mandatory fields: 'loss' (scalar) 'info' (dict) """ res = numpy.clip(config['x'] + numpy.random.randn()/budget, config['x']/2, 1.5*config['x']) time.sleep(self.sleep_interval) return({ 'loss': float(res), # this is the a mandatory field to run hyperband 'info': res # can be used for any user-defined information - also mandatory })
[ "def", "compute", "(", "self", ",", "config", ",", "budget", ",", "*", "*", "kwargs", ")", ":", "res", "=", "numpy", ".", "clip", "(", "config", "[", "'x'", "]", "+", "numpy", ".", "random", ".", "randn", "(", ")", "/", "budget", ",", "config", "[", "'x'", "]", "/", "2", ",", "1.5", "*", "config", "[", "'x'", "]", ")", "time", ".", "sleep", "(", "self", ".", "sleep_interval", ")", "return", "(", "{", "'loss'", ":", "float", "(", "res", ")", ",", "# this is the a mandatory field to run hyperband", "'info'", ":", "res", "# can be used for any user-defined information - also mandatory", "}", ")" ]
Simple example for a compute function The loss is just a the config + some noise (that decreases with the budget) For dramatization, the function can sleep for a given interval to emphasizes the speed ups achievable with parallel workers. Args: config: dictionary containing the sampled configurations by the optimizer budget: (float) amount of time/epochs/etc. the model can use to train Returns: dictionary with mandatory fields: 'loss' (scalar) 'info' (dict)
[ "Simple", "example", "for", "a", "compute", "function", "The", "loss", "is", "just", "a", "the", "config", "+", "some", "noise", "(", "that", "decreases", "with", "the", "budget", ")" ]
python
train
39.6
agile-geoscience/welly
welly/project.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/project.py#L211-L217
def count_mnemonic(self, mnemonic, uwis=uwis, alias=None): """ Counts the wells that have a given curve, given the mnemonic and an alias dict. """ all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias) return len(list(filter(None, utils.flatten_list(all_mnemonics))))
[ "def", "count_mnemonic", "(", "self", ",", "mnemonic", ",", "uwis", "=", "uwis", ",", "alias", "=", "None", ")", ":", "all_mnemonics", "=", "self", ".", "get_mnemonics", "(", "[", "mnemonic", "]", ",", "uwis", "=", "uwis", ",", "alias", "=", "alias", ")", "return", "len", "(", "list", "(", "filter", "(", "None", ",", "utils", ".", "flatten_list", "(", "all_mnemonics", ")", ")", ")", ")" ]
Counts the wells that have a given curve, given the mnemonic and an alias dict.
[ "Counts", "the", "wells", "that", "have", "a", "given", "curve", "given", "the", "mnemonic", "and", "an", "alias", "dict", "." ]
python
train
46.428571
aio-libs/aiohttp
aiohttp/http_websocket.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/http_websocket.py#L85-L91
def json(self, *, # type: ignore loads: Callable[[Any], Any]=json.loads) -> None: """Return parsed JSON data. .. versionadded:: 0.22 """ return loads(self.data)
[ "def", "json", "(", "self", ",", "*", ",", "# type: ignore", "loads", ":", "Callable", "[", "[", "Any", "]", ",", "Any", "]", "=", "json", ".", "loads", ")", "->", "None", ":", "return", "loads", "(", "self", ".", "data", ")" ]
Return parsed JSON data. .. versionadded:: 0.22
[ "Return", "parsed", "JSON", "data", "." ]
python
train
28.714286
djgagne/hagelslag
hagelslag/processing/ObjectMatcher.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/ObjectMatcher.py#L371-L388
def area_difference(item_a, time_a, item_b, time_b, max_value): """ RMS Difference in object areas. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """ size_a = item_a.size(time_a) size_b = item_b.size(time_b) diff = np.sqrt((size_a - size_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
[ "def", "area_difference", "(", "item_a", ",", "time_a", ",", "item_b", ",", "time_b", ",", "max_value", ")", ":", "size_a", "=", "item_a", ".", "size", "(", "time_a", ")", "size_b", "=", "item_b", ".", "size", "(", "time_b", ")", "diff", "=", "np", ".", "sqrt", "(", "(", "size_a", "-", "size_b", ")", "**", "2", ")", "return", "np", ".", "minimum", "(", "diff", ",", "max_value", ")", "/", "float", "(", "max_value", ")" ]
RMS Difference in object areas. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
[ "RMS", "Difference", "in", "object", "areas", "." ]
python
train
34.944444
honzajavorek/tipi
tipi/html.py
https://github.com/honzajavorek/tipi/blob/cbe51192725608b6fba1244a48610ae231b13e08/tipi/html.py#L39-L49
def parent_tags(self): """Provides tags of all parent HTML elements.""" tags = set() for addr in self._addresses: if addr.attr == 'text': tags.add(addr.element.tag) tags.update(el.tag for el in addr.element.iterancestors()) tags.discard(HTMLFragment._root_tag) return frozenset(tags)
[ "def", "parent_tags", "(", "self", ")", ":", "tags", "=", "set", "(", ")", "for", "addr", "in", "self", ".", "_addresses", ":", "if", "addr", ".", "attr", "==", "'text'", ":", "tags", ".", "add", "(", "addr", ".", "element", ".", "tag", ")", "tags", ".", "update", "(", "el", ".", "tag", "for", "el", "in", "addr", ".", "element", ".", "iterancestors", "(", ")", ")", "tags", ".", "discard", "(", "HTMLFragment", ".", "_root_tag", ")", "return", "frozenset", "(", "tags", ")" ]
Provides tags of all parent HTML elements.
[ "Provides", "tags", "of", "all", "parent", "HTML", "elements", "." ]
python
train
32.272727
rmed/pyemtmad
pyemtmad/wrapper.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/wrapper.py#L95-L111
def initialize(self, emt_id, emt_pass): """Manual initialization of the interface attributes. This is useful when the interface must be declare but initialized later on with parsed configuration values. Args: emt_id (str): ID given by the server upon registration emt_pass (str): Token given by the server upon registration """ self._emt_id = emt_id self._emt_pass = emt_pass # Initialize modules self.bus = BusApi(self) self.geo = GeoApi(self) self.parking = ParkingApi(self)
[ "def", "initialize", "(", "self", ",", "emt_id", ",", "emt_pass", ")", ":", "self", ".", "_emt_id", "=", "emt_id", "self", ".", "_emt_pass", "=", "emt_pass", "# Initialize modules", "self", ".", "bus", "=", "BusApi", "(", "self", ")", "self", ".", "geo", "=", "GeoApi", "(", "self", ")", "self", ".", "parking", "=", "ParkingApi", "(", "self", ")" ]
Manual initialization of the interface attributes. This is useful when the interface must be declare but initialized later on with parsed configuration values. Args: emt_id (str): ID given by the server upon registration emt_pass (str): Token given by the server upon registration
[ "Manual", "initialization", "of", "the", "interface", "attributes", "." ]
python
train
33.823529
rigetti/pyquil
pyquil/api/_quantum_computer.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_quantum_computer.py#L488-L510
def _get_qvm_based_on_real_device(name: str, device: Device, noisy: bool, connection: ForestConnection = None, qvm_type: str = 'qvm'): """ A qvm with a based on a real device. This is the most realistic QVM. :param name: The full name of this QVM :param device: The device from :py:func:`get_lattice`. :param noisy: Whether to construct a noisy quantum computer by using the device's associated noise model. :param connection: An optional :py:class:`ForestConnection` object. If not specified, the default values for URL endpoints will be used. :return: A pre-configured QuantumComputer based on the named device. """ if noisy: noise_model = device.noise_model else: noise_model = None return _get_qvm_qc(name=name, connection=connection, device=device, noise_model=noise_model, requires_executable=True, qvm_type=qvm_type)
[ "def", "_get_qvm_based_on_real_device", "(", "name", ":", "str", ",", "device", ":", "Device", ",", "noisy", ":", "bool", ",", "connection", ":", "ForestConnection", "=", "None", ",", "qvm_type", ":", "str", "=", "'qvm'", ")", ":", "if", "noisy", ":", "noise_model", "=", "device", ".", "noise_model", "else", ":", "noise_model", "=", "None", "return", "_get_qvm_qc", "(", "name", "=", "name", ",", "connection", "=", "connection", ",", "device", "=", "device", ",", "noise_model", "=", "noise_model", ",", "requires_executable", "=", "True", ",", "qvm_type", "=", "qvm_type", ")" ]
A qvm with a based on a real device. This is the most realistic QVM. :param name: The full name of this QVM :param device: The device from :py:func:`get_lattice`. :param noisy: Whether to construct a noisy quantum computer by using the device's associated noise model. :param connection: An optional :py:class:`ForestConnection` object. If not specified, the default values for URL endpoints will be used. :return: A pre-configured QuantumComputer based on the named device.
[ "A", "qvm", "with", "a", "based", "on", "a", "real", "device", "." ]
python
train
43.347826
Alignak-monitoring/alignak
alignak/daterange.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L586-L632
def get_next_invalid_time_from_t(self, timestamp): """Get next invalid time for time range :param timestamp: time we compute from :type timestamp: int :return: timestamp of the next invalid time (LOCAL TIME) :rtype: int """ if not self.is_time_valid(timestamp): return timestamp # First we search for the day of time range t_day = self.get_next_invalid_day(timestamp) # We search for the min of all tr.start > sec_from_morning # if it's the next day, use a start of the day search for timerange if timestamp < t_day: sec_from_morning = self.get_next_future_timerange_invalid(t_day) else: # it is in this day, so look from t (can be in the evening or so) sec_from_morning = self.get_next_future_timerange_invalid(timestamp) # tr can't be valid, or it will be return at the beginning # sec_from_morning = self.get_next_future_timerange_invalid(t) # Ok we've got a next invalid day and a invalid possibility in # timerange, so the next invalid is this day+sec_from_morning if t_day is not None and sec_from_morning is not None: return t_day + sec_from_morning + 1 # We've got a day but no sec_from_morning: the timerange is full (0->24h) # so the next invalid is this day at the day_start if t_day is not None and sec_from_morning is None: return t_day # Then we search for the next day of t # The sec will be the min of the day timestamp = get_day(timestamp) + 86400 t_day2 = self.get_next_invalid_day(timestamp) sec_from_morning = self.get_next_future_timerange_invalid(t_day2) if t_day2 is not None and sec_from_morning is not None: return t_day2 + sec_from_morning + 1 if t_day2 is not None and sec_from_morning is None: return t_day2 # I did not found any valid time return None
[ "def", "get_next_invalid_time_from_t", "(", "self", ",", "timestamp", ")", ":", "if", "not", "self", ".", "is_time_valid", "(", "timestamp", ")", ":", "return", "timestamp", "# First we search for the day of time range", "t_day", "=", "self", ".", "get_next_invalid_day", "(", "timestamp", ")", "# We search for the min of all tr.start > sec_from_morning", "# if it's the next day, use a start of the day search for timerange", "if", "timestamp", "<", "t_day", ":", "sec_from_morning", "=", "self", ".", "get_next_future_timerange_invalid", "(", "t_day", ")", "else", ":", "# it is in this day, so look from t (can be in the evening or so)", "sec_from_morning", "=", "self", ".", "get_next_future_timerange_invalid", "(", "timestamp", ")", "# tr can't be valid, or it will be return at the beginning", "# sec_from_morning = self.get_next_future_timerange_invalid(t)", "# Ok we've got a next invalid day and a invalid possibility in", "# timerange, so the next invalid is this day+sec_from_morning", "if", "t_day", "is", "not", "None", "and", "sec_from_morning", "is", "not", "None", ":", "return", "t_day", "+", "sec_from_morning", "+", "1", "# We've got a day but no sec_from_morning: the timerange is full (0->24h)", "# so the next invalid is this day at the day_start", "if", "t_day", "is", "not", "None", "and", "sec_from_morning", "is", "None", ":", "return", "t_day", "# Then we search for the next day of t", "# The sec will be the min of the day", "timestamp", "=", "get_day", "(", "timestamp", ")", "+", "86400", "t_day2", "=", "self", ".", "get_next_invalid_day", "(", "timestamp", ")", "sec_from_morning", "=", "self", ".", "get_next_future_timerange_invalid", "(", "t_day2", ")", "if", "t_day2", "is", "not", "None", "and", "sec_from_morning", "is", "not", "None", ":", "return", "t_day2", "+", "sec_from_morning", "+", "1", "if", "t_day2", "is", "not", "None", "and", "sec_from_morning", "is", "None", ":", "return", "t_day2", "# I did not found any valid time", "return", "None" ]
Get next invalid time for time range :param timestamp: time we compute from :type timestamp: int :return: timestamp of the next invalid time (LOCAL TIME) :rtype: int
[ "Get", "next", "invalid", "time", "for", "time", "range" ]
python
train
41.87234
awslabs/sockeye
sockeye/data_io.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/data_io.py#L1196-L1211
def create_sequence_readers(sources: List[str], target: str, vocab_sources: List[vocab.Vocab], vocab_target: vocab.Vocab) -> Tuple[List[SequenceReader], SequenceReader]: """ Create source readers with EOS and target readers with BOS. :param sources: The file names of source data and factors. :param target: The file name of the target data. :param vocab_sources: The source vocabularies. :param vocab_target: The target vocabularies. :return: The source sequence readers and the target reader. """ source_sequence_readers = [SequenceReader(source, vocab, add_eos=True) for source, vocab in zip(sources, vocab_sources)] target_sequence_reader = SequenceReader(target, vocab_target, add_bos=True) return source_sequence_readers, target_sequence_reader
[ "def", "create_sequence_readers", "(", "sources", ":", "List", "[", "str", "]", ",", "target", ":", "str", ",", "vocab_sources", ":", "List", "[", "vocab", ".", "Vocab", "]", ",", "vocab_target", ":", "vocab", ".", "Vocab", ")", "->", "Tuple", "[", "List", "[", "SequenceReader", "]", ",", "SequenceReader", "]", ":", "source_sequence_readers", "=", "[", "SequenceReader", "(", "source", ",", "vocab", ",", "add_eos", "=", "True", ")", "for", "source", ",", "vocab", "in", "zip", "(", "sources", ",", "vocab_sources", ")", "]", "target_sequence_reader", "=", "SequenceReader", "(", "target", ",", "vocab_target", ",", "add_bos", "=", "True", ")", "return", "source_sequence_readers", ",", "target_sequence_reader" ]
Create source readers with EOS and target readers with BOS. :param sources: The file names of source data and factors. :param target: The file name of the target data. :param vocab_sources: The source vocabularies. :param vocab_target: The target vocabularies. :return: The source sequence readers and the target reader.
[ "Create", "source", "readers", "with", "EOS", "and", "target", "readers", "with", "BOS", "." ]
python
train
54.1875
rodluger/pysyzygy
pysyzygy/transit.py
https://github.com/rodluger/pysyzygy/blob/d2b64251047cc0f0d0adeb6feab4054e7fce4b7a/pysyzygy/transit.py#L546-L569
def Free(self): ''' Frees the memory used by all of the dynamically allocated C arrays. ''' if self.arrays._calloc: _dbl_free(self.arrays._time) _dbl_free(self.arrays._flux) _dbl_free(self.arrays._bflx) _dbl_free(self.arrays._M) _dbl_free(self.arrays._E) _dbl_free(self.arrays._f) _dbl_free(self.arrays._r) _dbl_free(self.arrays._x) _dbl_free(self.arrays._y) _dbl_free(self.arrays._z) self.arrays._calloc = 0 if self.arrays._balloc: _dbl_free(self.arrays._b) self.arrays._balloc = 0 if self.arrays._ialloc: _dbl_free(self.arrays._iarr) self.arrays._ialloc = 0
[ "def", "Free", "(", "self", ")", ":", "if", "self", ".", "arrays", ".", "_calloc", ":", "_dbl_free", "(", "self", ".", "arrays", ".", "_time", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_flux", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_bflx", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_M", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_E", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_f", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_r", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_x", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_y", ")", "_dbl_free", "(", "self", ".", "arrays", ".", "_z", ")", "self", ".", "arrays", ".", "_calloc", "=", "0", "if", "self", ".", "arrays", ".", "_balloc", ":", "_dbl_free", "(", "self", ".", "arrays", ".", "_b", ")", "self", ".", "arrays", ".", "_balloc", "=", "0", "if", "self", ".", "arrays", ".", "_ialloc", ":", "_dbl_free", "(", "self", ".", "arrays", ".", "_iarr", ")", "self", ".", "arrays", ".", "_ialloc", "=", "0" ]
Frees the memory used by all of the dynamically allocated C arrays.
[ "Frees", "the", "memory", "used", "by", "all", "of", "the", "dynamically", "allocated", "C", "arrays", "." ]
python
test
27.416667
SmokinCaterpillar/pypet
pypet/parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/parameter.py#L1279-L1314
def _load(self, load_dict): """Reconstructs the data and exploration array. Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'. If not calls :class:`~pypet.parameter.Parameter._load` of the parent class. If the parameter is explored, the exploration range of arrays is reconstructed as it was stored in :func:`~pypet.parameter.ArrayParameter._store`. """ if self.v_locked: raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name) try: self._data = load_dict['data' + ArrayParameter.IDENTIFIER] if 'explored_data' + ArrayParameter.IDENTIFIER in load_dict: explore_table = load_dict['explored_data' + ArrayParameter.IDENTIFIER] idx = explore_table['idx'] explore_list = [] # Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__' for name_idx in idx: arrayname = self._build_name(name_idx) explore_list.append(load_dict[arrayname]) self._explored_range = [x for x in explore_list] self._explored = True except KeyError: super(ArrayParameter, self)._load(load_dict) self._default = self._data self._locked = True
[ "def", "_load", "(", "self", ",", "load_dict", ")", ":", "if", "self", ".", "v_locked", ":", "raise", "pex", ".", "ParameterLockedException", "(", "'Parameter `%s` is locked!'", "%", "self", ".", "v_full_name", ")", "try", ":", "self", ".", "_data", "=", "load_dict", "[", "'data'", "+", "ArrayParameter", ".", "IDENTIFIER", "]", "if", "'explored_data'", "+", "ArrayParameter", ".", "IDENTIFIER", "in", "load_dict", ":", "explore_table", "=", "load_dict", "[", "'explored_data'", "+", "ArrayParameter", ".", "IDENTIFIER", "]", "idx", "=", "explore_table", "[", "'idx'", "]", "explore_list", "=", "[", "]", "# Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'", "for", "name_idx", "in", "idx", ":", "arrayname", "=", "self", ".", "_build_name", "(", "name_idx", ")", "explore_list", ".", "append", "(", "load_dict", "[", "arrayname", "]", ")", "self", ".", "_explored_range", "=", "[", "x", "for", "x", "in", "explore_list", "]", "self", ".", "_explored", "=", "True", "except", "KeyError", ":", "super", "(", "ArrayParameter", ",", "self", ")", ".", "_load", "(", "load_dict", ")", "self", ".", "_default", "=", "self", ".", "_data", "self", ".", "_locked", "=", "True" ]
Reconstructs the data and exploration array. Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'. If not calls :class:`~pypet.parameter.Parameter._load` of the parent class. If the parameter is explored, the exploration range of arrays is reconstructed as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
[ "Reconstructs", "the", "data", "and", "exploration", "array", "." ]
python
test
37.472222
CalebBell/thermo
thermo/solubility.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/solubility.py#L229-L280
def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None): r'''Returns the freezing point depression caused by a solute in a solvent. Can use either the mole fraction of the solute or its molality and the molecular weight of the solvent. Assumes ideal system behavior. .. math:: \Delta T_m = \frac{R T_m^2 x}{\Delta H_m} \Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m} Parameters ---------- Tm : float Melting temperature of the solute [K] Hm : float Heat of melting at the melting temperature of the solute [J/mol] x : float, optional Mole fraction of the solute [-] M : float, optional Molality [mol/kg] MW: float, optional Molecular weight of the solvent [g/mol] Returns ------- dTm : float Freezing point depression [K] Notes ----- MW is the molecular weight of the solvent. M is the molality of the solute. Examples -------- From [1]_, matching example. >>> Tm_depression_eutectic(353.35, 19110, .02) 1.0864594900639515 References ---------- .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012. ''' if x: dTm = R*Tm**2*x/Hm elif M and MW: MW = MW/1000. #g/mol to kg/mol dTm = R*Tm**2*MW*M/Hm else: raise Exception('Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided') return dTm
[ "def", "Tm_depression_eutectic", "(", "Tm", ",", "Hm", ",", "x", "=", "None", ",", "M", "=", "None", ",", "MW", "=", "None", ")", ":", "if", "x", ":", "dTm", "=", "R", "*", "Tm", "**", "2", "*", "x", "/", "Hm", "elif", "M", "and", "MW", ":", "MW", "=", "MW", "/", "1000.", "#g/mol to kg/mol", "dTm", "=", "R", "*", "Tm", "**", "2", "*", "MW", "*", "M", "/", "Hm", "else", ":", "raise", "Exception", "(", "'Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided'", ")", "return", "dTm" ]
r'''Returns the freezing point depression caused by a solute in a solvent. Can use either the mole fraction of the solute or its molality and the molecular weight of the solvent. Assumes ideal system behavior. .. math:: \Delta T_m = \frac{R T_m^2 x}{\Delta H_m} \Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m} Parameters ---------- Tm : float Melting temperature of the solute [K] Hm : float Heat of melting at the melting temperature of the solute [J/mol] x : float, optional Mole fraction of the solute [-] M : float, optional Molality [mol/kg] MW: float, optional Molecular weight of the solvent [g/mol] Returns ------- dTm : float Freezing point depression [K] Notes ----- MW is the molecular weight of the solvent. M is the molality of the solute. Examples -------- From [1]_, matching example. >>> Tm_depression_eutectic(353.35, 19110, .02) 1.0864594900639515 References ---------- .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012.
[ "r", "Returns", "the", "freezing", "point", "depression", "caused", "by", "a", "solute", "in", "a", "solvent", ".", "Can", "use", "either", "the", "mole", "fraction", "of", "the", "solute", "or", "its", "molality", "and", "the", "molecular", "weight", "of", "the", "solvent", ".", "Assumes", "ideal", "system", "behavior", "." ]
python
valid
28.846154
bigchaindb/bigchaindb
bigchaindb/common/transaction.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/common/transaction.py#L92-L118
def to_dict(self): """Transforms the object to a Python dictionary. Note: If an Input hasn't been signed yet, this method returns a dictionary representation. Returns: dict: The Input as an alternative serialization format. """ try: fulfillment = self.fulfillment.serialize_uri() except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError): fulfillment = _fulfillment_to_details(self.fulfillment) try: # NOTE: `self.fulfills` can be `None` and that's fine fulfills = self.fulfills.to_dict() except AttributeError: fulfills = None input_ = { 'owners_before': self.owners_before, 'fulfills': fulfills, 'fulfillment': fulfillment, } return input_
[ "def", "to_dict", "(", "self", ")", ":", "try", ":", "fulfillment", "=", "self", ".", "fulfillment", ".", "serialize_uri", "(", ")", "except", "(", "TypeError", ",", "AttributeError", ",", "ASN1EncodeError", ",", "ASN1DecodeError", ")", ":", "fulfillment", "=", "_fulfillment_to_details", "(", "self", ".", "fulfillment", ")", "try", ":", "# NOTE: `self.fulfills` can be `None` and that's fine", "fulfills", "=", "self", ".", "fulfills", ".", "to_dict", "(", ")", "except", "AttributeError", ":", "fulfills", "=", "None", "input_", "=", "{", "'owners_before'", ":", "self", ".", "owners_before", ",", "'fulfills'", ":", "fulfills", ",", "'fulfillment'", ":", "fulfillment", ",", "}", "return", "input_" ]
Transforms the object to a Python dictionary. Note: If an Input hasn't been signed yet, this method returns a dictionary representation. Returns: dict: The Input as an alternative serialization format.
[ "Transforms", "the", "object", "to", "a", "Python", "dictionary", "." ]
python
train
32.222222
bwohlberg/sporco
sporco/prox/_lp.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/prox/_lp.py#L29-L64
def norm_l0(x, axis=None, eps=0.0): r"""Compute the :math:`\ell_0` "norm" (it is not really a norm) .. math:: \| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc} 0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0 \end{array} \right. where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`. Parameters ---------- x : array_like Input array :math:`\mathbf{x}` axis : `None` or int or tuple of ints, optional (default None) Axes of `x` over which to compute the :math:`\ell_0` "norm". If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct values are computed over the indices of the remaining axes of input array `x`. eps : float, optional (default 0.0) Absolute value threshold below which a number is considered to be zero. Returns ------- nl0 : float or ndarray Norm of `x`, or array of norms treating specified axes of `x` as a vector """ nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True) # If the result has a single element, convert it to a scalar if nl0.size == 1: nl0 = nl0.ravel()[0] return nl0
[ "def", "norm_l0", "(", "x", ",", "axis", "=", "None", ",", "eps", "=", "0.0", ")", ":", "nl0", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "x", ")", ">", "eps", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "# If the result has a single element, convert it to a scalar", "if", "nl0", ".", "size", "==", "1", ":", "nl0", "=", "nl0", ".", "ravel", "(", ")", "[", "0", "]", "return", "nl0" ]
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm) .. math:: \| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc} 0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0 \end{array} \right. where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`. Parameters ---------- x : array_like Input array :math:`\mathbf{x}` axis : `None` or int or tuple of ints, optional (default None) Axes of `x` over which to compute the :math:`\ell_0` "norm". If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct values are computed over the indices of the remaining axes of input array `x`. eps : float, optional (default 0.0) Absolute value threshold below which a number is considered to be zero. Returns ------- nl0 : float or ndarray Norm of `x`, or array of norms treating specified axes of `x` as a vector
[ "r", "Compute", "the", ":", "math", ":", "\\", "ell_0", "norm", "(", "it", "is", "not", "really", "a", "norm", ")" ]
python
train
33
MasoniteFramework/masonite
databases/seeds/user_table_seeder.py
https://github.com/MasoniteFramework/masonite/blob/c9bcca8f59169934c2accd8cecb2b996bb5e1a0d/databases/seeds/user_table_seeder.py#L18-L24
def run(self): """ Run the database seeds. """ self.factory.register(User, self.users_factory) self.factory(User, 50).create()
[ "def", "run", "(", "self", ")", ":", "self", ".", "factory", ".", "register", "(", "User", ",", "self", ".", "users_factory", ")", "self", ".", "factory", "(", "User", ",", "50", ")", ".", "create", "(", ")" ]
Run the database seeds.
[ "Run", "the", "database", "seeds", "." ]
python
train
23
saltstack/salt
salt/modules/mac_timezone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_timezone.py#L31-L58
def _get_date_time_format(dt_string): ''' Function that detects the date/time format for the string passed. :param str dt_string: A date/time string :return: The format of the passed dt_string :rtype: str :raises: SaltInvocationError on Invalid Date/Time string ''' valid_formats = [ '%H:%M', '%H:%M:%S', '%m:%d:%y', '%m:%d:%Y', '%m/%d/%y', '%m/%d/%Y' ] for dt_format in valid_formats: try: datetime.strptime(dt_string, dt_format) return dt_format except ValueError: continue msg = 'Invalid Date/Time Format: {0}'.format(dt_string) raise SaltInvocationError(msg)
[ "def", "_get_date_time_format", "(", "dt_string", ")", ":", "valid_formats", "=", "[", "'%H:%M'", ",", "'%H:%M:%S'", ",", "'%m:%d:%y'", ",", "'%m:%d:%Y'", ",", "'%m/%d/%y'", ",", "'%m/%d/%Y'", "]", "for", "dt_format", "in", "valid_formats", ":", "try", ":", "datetime", ".", "strptime", "(", "dt_string", ",", "dt_format", ")", "return", "dt_format", "except", "ValueError", ":", "continue", "msg", "=", "'Invalid Date/Time Format: {0}'", ".", "format", "(", "dt_string", ")", "raise", "SaltInvocationError", "(", "msg", ")" ]
Function that detects the date/time format for the string passed. :param str dt_string: A date/time string :return: The format of the passed dt_string :rtype: str :raises: SaltInvocationError on Invalid Date/Time string
[ "Function", "that", "detects", "the", "date", "/", "time", "format", "for", "the", "string", "passed", "." ]
python
train
24.785714
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L1196-L1210
def ListChildren(self, urn, limit=None, age=NEWEST_TIME): """Lists bunch of directories efficiently. Args: urn: Urn to list children. limit: Max number of children to list. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: RDFURNs instances of each child. """ _, children_urns = list( self.MultiListChildren([urn], limit=limit, age=age))[0] return children_urns
[ "def", "ListChildren", "(", "self", ",", "urn", ",", "limit", "=", "None", ",", "age", "=", "NEWEST_TIME", ")", ":", "_", ",", "children_urns", "=", "list", "(", "self", ".", "MultiListChildren", "(", "[", "urn", "]", ",", "limit", "=", "limit", ",", "age", "=", "age", ")", ")", "[", "0", "]", "return", "children_urns" ]
Lists bunch of directories efficiently. Args: urn: Urn to list children. limit: Max number of children to list. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: RDFURNs instances of each child.
[ "Lists", "bunch", "of", "directories", "efficiently", "." ]
python
train
30.866667
adamheins/r12
r12/arm.py
https://github.com/adamheins/r12/blob/ff78178332140930bf46a94a0b15ee082bb92491/r12/arm.py#L160-L165
def dump(self, raw=False): ''' Dump all output currently in the arm's output queue. ''' raw_out = self.ser.read(self.ser.in_waiting) if raw: return raw_out return raw_out.decode(OUTPUT_ENCODING)
[ "def", "dump", "(", "self", ",", "raw", "=", "False", ")", ":", "raw_out", "=", "self", ".", "ser", ".", "read", "(", "self", ".", "ser", ".", "in_waiting", ")", "if", "raw", ":", "return", "raw_out", "return", "raw_out", ".", "decode", "(", "OUTPUT_ENCODING", ")" ]
Dump all output currently in the arm's output queue.
[ "Dump", "all", "output", "currently", "in", "the", "arm", "s", "output", "queue", "." ]
python
train
38.833333
pythongssapi/python-gssapi
gssapi/creds.py
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L269-L301
def inquire_by_mech(self, mech, name=True, init_lifetime=True, accept_lifetime=True, usage=True): """Inspect these credentials for per-mechanism information This method inspects these credentials for per-mechanism information about them. Args: mech (OID): the mechanism for which to retrive the information name (bool): get the name associated with the credentials init_lifetime (bool): get the remaining initiate lifetime for the credentials accept_lifetime (bool): get the remaining accept lifetime for the credentials usage (bool): get the usage for the credentials Returns: InquireCredByMechResult: the information about the credentials, with None used when the corresponding argument was False """ res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime, accept_lifetime, usage) if res.name is not None: res_name = names.Name(res.name) else: res_name = None return tuples.InquireCredByMechResult(res_name, res.init_lifetime, res.accept_lifetime, res.usage)
[ "def", "inquire_by_mech", "(", "self", ",", "mech", ",", "name", "=", "True", ",", "init_lifetime", "=", "True", ",", "accept_lifetime", "=", "True", ",", "usage", "=", "True", ")", ":", "res", "=", "rcreds", ".", "inquire_cred_by_mech", "(", "self", ",", "mech", ",", "name", ",", "init_lifetime", ",", "accept_lifetime", ",", "usage", ")", "if", "res", ".", "name", "is", "not", "None", ":", "res_name", "=", "names", ".", "Name", "(", "res", ".", "name", ")", "else", ":", "res_name", "=", "None", "return", "tuples", ".", "InquireCredByMechResult", "(", "res_name", ",", "res", ".", "init_lifetime", ",", "res", ".", "accept_lifetime", ",", "res", ".", "usage", ")" ]
Inspect these credentials for per-mechanism information This method inspects these credentials for per-mechanism information about them. Args: mech (OID): the mechanism for which to retrive the information name (bool): get the name associated with the credentials init_lifetime (bool): get the remaining initiate lifetime for the credentials accept_lifetime (bool): get the remaining accept lifetime for the credentials usage (bool): get the usage for the credentials Returns: InquireCredByMechResult: the information about the credentials, with None used when the corresponding argument was False
[ "Inspect", "these", "credentials", "for", "per", "-", "mechanism", "information" ]
python
train
41.606061
elastic/elasticsearch-py
elasticsearch/client/tasks.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/tasks.py#L48-L59
def get(self, task_id=None, params=None): """ Retrieve information for a particular task. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_ :arg task_id: Return the task with specified id (node_id:task_number) :arg wait_for_completion: Wait for the matching tasks to complete (default: false) :arg timeout: Maximum waiting time for `wait_for_completion` """ return self.transport.perform_request('GET', _make_path('_tasks', task_id), params=params)
[ "def", "get", "(", "self", ",", "task_id", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "'GET'", ",", "_make_path", "(", "'_tasks'", ",", "task_id", ")", ",", "params", "=", "params", ")" ]
Retrieve information for a particular task. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_ :arg task_id: Return the task with specified id (node_id:task_number) :arg wait_for_completion: Wait for the matching tasks to complete (default: false) :arg timeout: Maximum waiting time for `wait_for_completion`
[ "Retrieve", "information", "for", "a", "particular", "task", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "tasks", ".", "html", ">", "_" ]
python
train
46.25
pymoca/pymoca
src/pymoca/tree.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/tree.py#L702-L717
def fully_scope_function_calls(node: ast.Tree, expression: ast.Expression, function_set: OrderedDict) -> ast.Expression: """ Turns the function references in this expression into fully scoped references (e.g. relative to absolute). The component references of all referenced functions are put into the functions set. :param node: collection for performing symbol lookup etc. :param expression: original expression :param function_set: output of function component references :return: """ expression_copy = copy.deepcopy(expression) w = TreeWalker() w.walk(FunctionExpander(node, function_set), expression_copy) return expression_copy
[ "def", "fully_scope_function_calls", "(", "node", ":", "ast", ".", "Tree", ",", "expression", ":", "ast", ".", "Expression", ",", "function_set", ":", "OrderedDict", ")", "->", "ast", ".", "Expression", ":", "expression_copy", "=", "copy", ".", "deepcopy", "(", "expression", ")", "w", "=", "TreeWalker", "(", ")", "w", ".", "walk", "(", "FunctionExpander", "(", "node", ",", "function_set", ")", ",", "expression_copy", ")", "return", "expression_copy" ]
Turns the function references in this expression into fully scoped references (e.g. relative to absolute). The component references of all referenced functions are put into the functions set. :param node: collection for performing symbol lookup etc. :param expression: original expression :param function_set: output of function component references :return:
[ "Turns", "the", "function", "references", "in", "this", "expression", "into", "fully", "scoped", "references", "(", "e", ".", "g", ".", "relative", "to", "absolute", ")", ".", "The", "component", "references", "of", "all", "referenced", "functions", "are", "put", "into", "the", "functions", "set", "." ]
python
train
42
evyatarmeged/Raccoon
raccoon_src/lib/web_app.py
https://github.com/evyatarmeged/Raccoon/blob/985797f73329976ec9c3fefbe4bbb3c74096ca51/raccoon_src/lib/web_app.py#L29-L53
def _detect_cms(self, tries=0): """ Detect CMS using whatcms.org. Has a re-try mechanism because false negatives may occur :param tries: Count of tries for CMS discovery """ # WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request. page = requests.get(url="https://whatcms.org/?s={}".format(self.host.target)) soup = BeautifulSoup(page.text, "lxml") found = soup.select(".panel.panel-success") if found: try: cms = [a for a in soup.select("a") if "/c/" in a.get("href")][0] self.logger.info("{} CMS detected: target is using {}{}{}".format( COLORED_COMBOS.GOOD, COLOR.GREEN, cms.get("title"), COLOR.RESET)) except IndexError: if tries >= 4: return else: self._detect_cms(tries=tries + 1) else: if tries >= 4: return else: self._detect_cms(tries=tries + 1)
[ "def", "_detect_cms", "(", "self", ",", "tries", "=", "0", ")", ":", "# WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request.", "page", "=", "requests", ".", "get", "(", "url", "=", "\"https://whatcms.org/?s={}\"", ".", "format", "(", "self", ".", "host", ".", "target", ")", ")", "soup", "=", "BeautifulSoup", "(", "page", ".", "text", ",", "\"lxml\"", ")", "found", "=", "soup", ".", "select", "(", "\".panel.panel-success\"", ")", "if", "found", ":", "try", ":", "cms", "=", "[", "a", "for", "a", "in", "soup", ".", "select", "(", "\"a\"", ")", "if", "\"/c/\"", "in", "a", ".", "get", "(", "\"href\"", ")", "]", "[", "0", "]", "self", ".", "logger", ".", "info", "(", "\"{} CMS detected: target is using {}{}{}\"", ".", "format", "(", "COLORED_COMBOS", ".", "GOOD", ",", "COLOR", ".", "GREEN", ",", "cms", ".", "get", "(", "\"title\"", ")", ",", "COLOR", ".", "RESET", ")", ")", "except", "IndexError", ":", "if", "tries", ">=", "4", ":", "return", "else", ":", "self", ".", "_detect_cms", "(", "tries", "=", "tries", "+", "1", ")", "else", ":", "if", "tries", ">=", "4", ":", "return", "else", ":", "self", ".", "_detect_cms", "(", "tries", "=", "tries", "+", "1", ")" ]
Detect CMS using whatcms.org. Has a re-try mechanism because false negatives may occur :param tries: Count of tries for CMS discovery
[ "Detect", "CMS", "using", "whatcms", ".", "org", ".", "Has", "a", "re", "-", "try", "mechanism", "because", "false", "negatives", "may", "occur", ":", "param", "tries", ":", "Count", "of", "tries", "for", "CMS", "discovery" ]
python
train
42.52
cltk/cltk
cltk/corpus/sanskrit/itrans/itrans_transliterator.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/sanskrit/itrans/itrans_transliterator.py#L322-L340
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree): """ Build the search tree for multi-character encodings. """ if colIndex == self._longestEntry: return prevchar = None rowIndex = rowFrom while rowIndex <= rowTo: if colIndex < len(self._parsedata[rowIndex]): c = self._parsedata[rowIndex][colIndex] if c != prevchar: tree[c] = {} if prevchar is not None: self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar]) rowFrom = rowIndex prevchar = c if rowIndex == rowTo: self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar]) rowIndex = rowIndex + 1
[ "def", "_setupParseTree", "(", "self", ",", "rowFrom", ",", "rowTo", ",", "colIndex", ",", "tree", ")", ":", "if", "colIndex", "==", "self", ".", "_longestEntry", ":", "return", "prevchar", "=", "None", "rowIndex", "=", "rowFrom", "while", "rowIndex", "<=", "rowTo", ":", "if", "colIndex", "<", "len", "(", "self", ".", "_parsedata", "[", "rowIndex", "]", ")", ":", "c", "=", "self", ".", "_parsedata", "[", "rowIndex", "]", "[", "colIndex", "]", "if", "c", "!=", "prevchar", ":", "tree", "[", "c", "]", "=", "{", "}", "if", "prevchar", "is", "not", "None", ":", "self", ".", "_setupParseTree", "(", "rowFrom", ",", "rowIndex", "-", "1", ",", "colIndex", "+", "1", ",", "tree", "[", "prevchar", "]", ")", "rowFrom", "=", "rowIndex", "prevchar", "=", "c", "if", "rowIndex", "==", "rowTo", ":", "self", ".", "_setupParseTree", "(", "rowFrom", ",", "rowIndex", ",", "colIndex", "+", "1", ",", "tree", "[", "prevchar", "]", ")", "rowIndex", "=", "rowIndex", "+", "1" ]
Build the search tree for multi-character encodings.
[ "Build", "the", "search", "tree", "for", "multi", "-", "character", "encodings", "." ]
python
train
43.368421
wmayner/pyphi
pyphi/subsystem.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/subsystem.py#L600-L605
def cause_mip(self, mechanism, purview): """Return the irreducibility analysis for the cause MIP. Alias for |find_mip()| with ``direction`` set to |CAUSE|. """ return self.find_mip(Direction.CAUSE, mechanism, purview)
[ "def", "cause_mip", "(", "self", ",", "mechanism", ",", "purview", ")", ":", "return", "self", ".", "find_mip", "(", "Direction", ".", "CAUSE", ",", "mechanism", ",", "purview", ")" ]
Return the irreducibility analysis for the cause MIP. Alias for |find_mip()| with ``direction`` set to |CAUSE|.
[ "Return", "the", "irreducibility", "analysis", "for", "the", "cause", "MIP", "." ]
python
train
40.833333
brainiak/brainiak
brainiak/isc.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L525-L585
def squareform_isfc(isfcs, iscs=None): """Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa If input is a 2- or 3-dimensional array of square ISFC matrices, converts this to the condensed off-diagonal ISFC values (i.e., the vectorized triangle) and the diagonal ISC values. In this case, input must be a single array of shape either n_voxels x n_voxels or n_subjects (or n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized according to scipy.spatial.distance.squareform, yielding n_voxels * (n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if input is an array of condensed off-diagonal ISFC values and an array of diagonal ISC values, the square (redundant) ISFC values are returned. This function mimics scipy.spatial.distance.squareform, but is intended to retain the diagonal ISC values. Parameters ---------- isfcs : ndarray Either condensed or redundant ISFC values iscs: ndarray, optional Diagonal ISC values, required when input is condensed Returns ------- isfcs : ndarray or tuple of ndarrays If condensed ISFCs are passed, a single redundant ISFC array is returned; if redundant ISFCs are passed, both a condensed off- diagonal ISFC array and the diagonal ISC values are returned """ # Check if incoming ISFCs are square (redundant) if not type(iscs) == np.ndarray and isfcs.shape[-2] == isfcs.shape[-1]: if isfcs.ndim == 2: isfcs = isfcs[np.newaxis, ...] if isfcs.ndim == 3: iscs = np.diagonal(isfcs, axis1=1, axis2=2) isfcs = np.vstack([squareform(isfc, checks=False)[np.newaxis, :] for isfc in isfcs]) else: raise ValueError("Square (redundant) ISFCs must be square " "with multiple subjects or pairs of subjects " "indexed by the first dimension") if isfcs.shape[0] == iscs.shape[0] == 1: isfcs, iscs = isfcs[0], iscs[0] return isfcs, iscs # Otherwise, convert from condensed to redundant else: if isfcs.ndim == iscs.ndim == 1: isfcs, iscs = isfcs[np.newaxis, :], iscs[np.newaxis, :] isfcs_stack = [] for isfc, isc in zip(isfcs, iscs): isfc_sq = squareform(isfc, checks=False) np.fill_diagonal(isfc_sq, isc) isfcs_stack.append(isfc_sq[np.newaxis, ...]) isfcs = np.vstack(isfcs_stack) if isfcs.shape[0] == 1: isfcs = isfcs[0] return isfcs
[ "def", "squareform_isfc", "(", "isfcs", ",", "iscs", "=", "None", ")", ":", "# Check if incoming ISFCs are square (redundant)", "if", "not", "type", "(", "iscs", ")", "==", "np", ".", "ndarray", "and", "isfcs", ".", "shape", "[", "-", "2", "]", "==", "isfcs", ".", "shape", "[", "-", "1", "]", ":", "if", "isfcs", ".", "ndim", "==", "2", ":", "isfcs", "=", "isfcs", "[", "np", ".", "newaxis", ",", "...", "]", "if", "isfcs", ".", "ndim", "==", "3", ":", "iscs", "=", "np", ".", "diagonal", "(", "isfcs", ",", "axis1", "=", "1", ",", "axis2", "=", "2", ")", "isfcs", "=", "np", ".", "vstack", "(", "[", "squareform", "(", "isfc", ",", "checks", "=", "False", ")", "[", "np", ".", "newaxis", ",", ":", "]", "for", "isfc", "in", "isfcs", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Square (redundant) ISFCs must be square \"", "\"with multiple subjects or pairs of subjects \"", "\"indexed by the first dimension\"", ")", "if", "isfcs", ".", "shape", "[", "0", "]", "==", "iscs", ".", "shape", "[", "0", "]", "==", "1", ":", "isfcs", ",", "iscs", "=", "isfcs", "[", "0", "]", ",", "iscs", "[", "0", "]", "return", "isfcs", ",", "iscs", "# Otherwise, convert from condensed to redundant", "else", ":", "if", "isfcs", ".", "ndim", "==", "iscs", ".", "ndim", "==", "1", ":", "isfcs", ",", "iscs", "=", "isfcs", "[", "np", ".", "newaxis", ",", ":", "]", ",", "iscs", "[", "np", ".", "newaxis", ",", ":", "]", "isfcs_stack", "=", "[", "]", "for", "isfc", ",", "isc", "in", "zip", "(", "isfcs", ",", "iscs", ")", ":", "isfc_sq", "=", "squareform", "(", "isfc", ",", "checks", "=", "False", ")", "np", ".", "fill_diagonal", "(", "isfc_sq", ",", "isc", ")", "isfcs_stack", ".", "append", "(", "isfc_sq", "[", "np", ".", "newaxis", ",", "...", "]", ")", "isfcs", "=", "np", ".", "vstack", "(", "isfcs_stack", ")", "if", "isfcs", ".", "shape", "[", "0", "]", "==", "1", ":", "isfcs", "=", "isfcs", "[", "0", "]", "return", "isfcs" ]
Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa If input is a 2- or 3-dimensional array of square ISFC matrices, converts this to the condensed off-diagonal ISFC values (i.e., the vectorized triangle) and the diagonal ISC values. In this case, input must be a single array of shape either n_voxels x n_voxels or n_subjects (or n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized according to scipy.spatial.distance.squareform, yielding n_voxels * (n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if input is an array of condensed off-diagonal ISFC values and an array of diagonal ISC values, the square (redundant) ISFC values are returned. This function mimics scipy.spatial.distance.squareform, but is intended to retain the diagonal ISC values. Parameters ---------- isfcs : ndarray Either condensed or redundant ISFC values iscs: ndarray, optional Diagonal ISC values, required when input is condensed Returns ------- isfcs : ndarray or tuple of ndarrays If condensed ISFCs are passed, a single redundant ISFC array is returned; if redundant ISFCs are passed, both a condensed off- diagonal ISFC array and the diagonal ISC values are returned
[ "Converts", "square", "ISFCs", "to", "condensed", "ISFCs", "(", "and", "ISCs", ")", "and", "vice", "-", "versa" ]
python
train
42.491803
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QAAccount.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1850-L1857
def change_cash(self, money): """ 外部操作|高危| """ res = self.cash[-1] + money if res >= 0: # 高危操作 self.cash[-1] = res
[ "def", "change_cash", "(", "self", ",", "money", ")", ":", "res", "=", "self", ".", "cash", "[", "-", "1", "]", "+", "money", "if", "res", ">=", "0", ":", "# 高危操作", "self", ".", "cash", "[", "-", "1", "]", "=", "res" ]
外部操作|高危|
[ "外部操作|高危|" ]
python
train
21.375
iwanbk/nyamuk
nyamuk/nyamuk.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L182-L193
def disconnect(self): """Disconnect from server.""" self.logger.info("DISCONNECT") if self.sock == NC.INVALID_SOCKET: return NC.ERR_NO_CONN self.state = NC.CS_DISCONNECTING ret = self.send_disconnect() ret2, bytes_written = self.packet_write() self.socket_close() return ret
[ "def", "disconnect", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"DISCONNECT\"", ")", "if", "self", ".", "sock", "==", "NC", ".", "INVALID_SOCKET", ":", "return", "NC", ".", "ERR_NO_CONN", "self", ".", "state", "=", "NC", ".", "CS_DISCONNECTING", "ret", "=", "self", ".", "send_disconnect", "(", ")", "ret2", ",", "bytes_written", "=", "self", ".", "packet_write", "(", ")", "self", ".", "socket_close", "(", ")", "return", "ret" ]
Disconnect from server.
[ "Disconnect", "from", "server", "." ]
python
train
29.083333
Godley/MuseParse
MuseParse/classes/Input/MxmlParser.py
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Input/MxmlParser.py#L397-L413
def ignore_exception(IgnoreException=Exception, DefaultVal=None): """ Decorator for ignoring exception from a function e.g. @ignore_exception(DivideByZero) e.g.2. ignore_exception(DivideByZero)(Divide)(2/0) borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer """ def dec(function): def _dec(*args, **kwargs): try: return function(*args, **kwargs) except IgnoreException: return DefaultVal return _dec return dec
[ "def", "ignore_exception", "(", "IgnoreException", "=", "Exception", ",", "DefaultVal", "=", "None", ")", ":", "def", "dec", "(", "function", ")", ":", "def", "_dec", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "IgnoreException", ":", "return", "DefaultVal", "return", "_dec", "return", "dec" ]
Decorator for ignoring exception from a function e.g. @ignore_exception(DivideByZero) e.g.2. ignore_exception(DivideByZero)(Divide)(2/0) borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer
[ "Decorator", "for", "ignoring", "exception", "from", "a", "function", "e", ".", "g", "." ]
python
train
34.470588
datajoint/datajoint-python
datajoint/table.py
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L89-L96
def parents(self, primary=None): """ :param primary: if None, then all parents are returned. If True, then only foreign keys composed of primary key attributes are considered. If False, the only foreign keys including at least one non-primary attribute are considered. :return: dict of tables referenced with self's foreign keys """ return self.connection.dependencies.parents(self.full_table_name, primary)
[ "def", "parents", "(", "self", ",", "primary", "=", "None", ")", ":", "return", "self", ".", "connection", ".", "dependencies", ".", "parents", "(", "self", ".", "full_table_name", ",", "primary", ")" ]
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of primary key attributes are considered. If False, the only foreign keys including at least one non-primary attribute are considered. :return: dict of tables referenced with self's foreign keys
[ ":", "param", "primary", ":", "if", "None", "then", "all", "parents", "are", "returned", ".", "If", "True", "then", "only", "foreign", "keys", "composed", "of", "primary", "key", "attributes", "are", "considered", ".", "If", "False", "the", "only", "foreign", "keys", "including", "at", "least", "one", "non", "-", "primary", "attribute", "are", "considered", ".", ":", "return", ":", "dict", "of", "tables", "referenced", "with", "self", "s", "foreign", "keys" ]
python
train
58.125
pahaz/sshtunnel
sshtunnel.py
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L726-L754
def local_is_up(self, target): """ Check if a tunnel is up (remote target's host is reachable on TCP target's port) Arguments: target (tuple): tuple of type (``str``, ``int``) indicating the listen IP address and port Return: boolean .. deprecated:: 0.1.0 Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up` """ try: check_address(target) except ValueError: self.logger.warning('Target must be a tuple (IP, port), where IP ' 'is a string (i.e. "192.168.0.1") and port is ' 'an integer (i.e. 40000). Alternatively ' 'target can be a valid UNIX domain socket.') return False if self.skip_tunnel_checkup: # force tunnel check at this point self.skip_tunnel_checkup = False self.check_tunnels() self.skip_tunnel_checkup = True # roll it back return self.tunnel_is_up.get(target, True)
[ "def", "local_is_up", "(", "self", ",", "target", ")", ":", "try", ":", "check_address", "(", "target", ")", "except", "ValueError", ":", "self", ".", "logger", ".", "warning", "(", "'Target must be a tuple (IP, port), where IP '", "'is a string (i.e. \"192.168.0.1\") and port is '", "'an integer (i.e. 40000). Alternatively '", "'target can be a valid UNIX domain socket.'", ")", "return", "False", "if", "self", ".", "skip_tunnel_checkup", ":", "# force tunnel check at this point", "self", ".", "skip_tunnel_checkup", "=", "False", "self", ".", "check_tunnels", "(", ")", "self", ".", "skip_tunnel_checkup", "=", "True", "# roll it back", "return", "self", ".", "tunnel_is_up", ".", "get", "(", "target", ",", "True", ")" ]
Check if a tunnel is up (remote target's host is reachable on TCP target's port) Arguments: target (tuple): tuple of type (``str``, ``int``) indicating the listen IP address and port Return: boolean .. deprecated:: 0.1.0 Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up`
[ "Check", "if", "a", "tunnel", "is", "up", "(", "remote", "target", "s", "host", "is", "reachable", "on", "TCP", "target", "s", "port", ")" ]
python
train
37.655172
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L2038-L2054
def InformarCalidadCertificacion(self, coe): "Informar calidad de un certificado (C1116A/RT)" # llamo al webservice: ret = self.client.cgInformarCalidad( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, calidad=self.certificacion['primaria']['calidad'], ) # analizo la respusta ret = ret['oReturn'] self.__analizar_errores(ret) self.AnalizarAutorizarCertificadoResp(ret) return True
[ "def", "InformarCalidadCertificacion", "(", "self", ",", "coe", ")", ":", "# llamo al webservice:", "ret", "=", "self", ".", "client", ".", "cgInformarCalidad", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", "coe", "=", "coe", ",", "calidad", "=", "self", ".", "certificacion", "[", "'primaria'", "]", "[", "'calidad'", "]", ",", ")", "# analizo la respusta", "ret", "=", "ret", "[", "'oReturn'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "self", ".", "AnalizarAutorizarCertificadoResp", "(", "ret", ")", "return", "True" ]
Informar calidad de un certificado (C1116A/RT)
[ "Informar", "calidad", "de", "un", "certificado", "(", "C1116A", "/", "RT", ")" ]
python
train
36.529412
osrg/ryu
ryu/services/protocols/bgp/peer.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/peer.py#L536-L570
def on_update_enabled(self, conf_evt): """Implements neighbor configuration change listener. """ enabled = conf_evt.value # If we do not have any protocol bound and configuration asks us to # enable this peer, we try to establish connection again. if enabled: LOG.info('%s enabled', self) if self._protocol and self._protocol.started: LOG.error('Tried to enable neighbor that is already enabled') else: self.state.bgp_state = const.BGP_FSM_CONNECT # Restart connect loop if not already running. if not self._connect_retry_event.is_set(): self._connect_retry_event.set() LOG.debug('Starting connect loop as neighbor is enabled.') else: LOG.info('%s disabled', self) if self._protocol: # Stopping protocol will eventually trigger connection_lost # handler which will do some clean-up. # But the greenlet that is in charge of the socket may be kill # when we stop the protocol, hence we call connection_lost # here as we triggered socket to close. self._protocol.send_notification( BGP_ERROR_CEASE, BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN ) self._protocol.stop() self._protocol = None self.state.bgp_state = const.BGP_FSM_IDLE # If this peer is not enabled any-more we stop trying to make any # connection. LOG.debug('Disabling connect-retry as neighbor was disabled') self._connect_retry_event.clear()
[ "def", "on_update_enabled", "(", "self", ",", "conf_evt", ")", ":", "enabled", "=", "conf_evt", ".", "value", "# If we do not have any protocol bound and configuration asks us to", "# enable this peer, we try to establish connection again.", "if", "enabled", ":", "LOG", ".", "info", "(", "'%s enabled'", ",", "self", ")", "if", "self", ".", "_protocol", "and", "self", ".", "_protocol", ".", "started", ":", "LOG", ".", "error", "(", "'Tried to enable neighbor that is already enabled'", ")", "else", ":", "self", ".", "state", ".", "bgp_state", "=", "const", ".", "BGP_FSM_CONNECT", "# Restart connect loop if not already running.", "if", "not", "self", ".", "_connect_retry_event", ".", "is_set", "(", ")", ":", "self", ".", "_connect_retry_event", ".", "set", "(", ")", "LOG", ".", "debug", "(", "'Starting connect loop as neighbor is enabled.'", ")", "else", ":", "LOG", ".", "info", "(", "'%s disabled'", ",", "self", ")", "if", "self", ".", "_protocol", ":", "# Stopping protocol will eventually trigger connection_lost", "# handler which will do some clean-up.", "# But the greenlet that is in charge of the socket may be kill", "# when we stop the protocol, hence we call connection_lost", "# here as we triggered socket to close.", "self", ".", "_protocol", ".", "send_notification", "(", "BGP_ERROR_CEASE", ",", "BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN", ")", "self", ".", "_protocol", ".", "stop", "(", ")", "self", ".", "_protocol", "=", "None", "self", ".", "state", ".", "bgp_state", "=", "const", ".", "BGP_FSM_IDLE", "# If this peer is not enabled any-more we stop trying to make any", "# connection.", "LOG", ".", "debug", "(", "'Disabling connect-retry as neighbor was disabled'", ")", "self", ".", "_connect_retry_event", ".", "clear", "(", ")" ]
Implements neighbor configuration change listener.
[ "Implements", "neighbor", "configuration", "change", "listener", "." ]
python
train
49.457143
xeroc/python-graphenelib
graphenecommon/chain.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/chain.py#L60-L75
def connect(self, node="", rpcuser="", rpcpassword="", **kwargs): """ Connect to blockchain network (internal use only) """ if not node: if "node" in self.config: node = self.config["node"] else: raise ValueError("A Blockchain node needs to be provided!") if not rpcuser and "rpcuser" in self.config: rpcuser = self.config["rpcuser"] if not rpcpassword and "rpcpassword" in self.config: rpcpassword = self.config["rpcpassword"] self.rpc = self.rpc_class(node, rpcuser, rpcpassword, **kwargs)
[ "def", "connect", "(", "self", ",", "node", "=", "\"\"", ",", "rpcuser", "=", "\"\"", ",", "rpcpassword", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "if", "not", "node", ":", "if", "\"node\"", "in", "self", ".", "config", ":", "node", "=", "self", ".", "config", "[", "\"node\"", "]", "else", ":", "raise", "ValueError", "(", "\"A Blockchain node needs to be provided!\"", ")", "if", "not", "rpcuser", "and", "\"rpcuser\"", "in", "self", ".", "config", ":", "rpcuser", "=", "self", ".", "config", "[", "\"rpcuser\"", "]", "if", "not", "rpcpassword", "and", "\"rpcpassword\"", "in", "self", ".", "config", ":", "rpcpassword", "=", "self", ".", "config", "[", "\"rpcpassword\"", "]", "self", ".", "rpc", "=", "self", ".", "rpc_class", "(", "node", ",", "rpcuser", ",", "rpcpassword", ",", "*", "*", "kwargs", ")" ]
Connect to blockchain network (internal use only)
[ "Connect", "to", "blockchain", "network", "(", "internal", "use", "only", ")" ]
python
valid
37.9375
czepluch/pysecp256k1
c_secp256k1/__init__.py
https://github.com/czepluch/pysecp256k1/blob/164cb305857c5ba7a26adb6bd85459c5ea32ddd1/c_secp256k1/__init__.py#L173-L197
def _parse_to_recoverable_signature(sig): """ Returns a parsed recoverable signature of length 65 bytes """ # Buffer for getting values of signature object assert isinstance(sig, bytes) assert len(sig) == 65 # Make a recoverable signature of 65 bytes rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *") # Retrieving the recid from the last byte of the signed key recid = ord(sig[64:65]) # Parse a revoverable signature parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact( ctx, rec_sig, sig, recid ) # Verify that the signature is parsable if not parsable_sig: raise InvalidSignatureError() return rec_sig
[ "def", "_parse_to_recoverable_signature", "(", "sig", ")", ":", "# Buffer for getting values of signature object", "assert", "isinstance", "(", "sig", ",", "bytes", ")", "assert", "len", "(", "sig", ")", "==", "65", "# Make a recoverable signature of 65 bytes", "rec_sig", "=", "ffi", ".", "new", "(", "\"secp256k1_ecdsa_recoverable_signature *\"", ")", "# Retrieving the recid from the last byte of the signed key", "recid", "=", "ord", "(", "sig", "[", "64", ":", "65", "]", ")", "# Parse a revoverable signature", "parsable_sig", "=", "lib", ".", "secp256k1_ecdsa_recoverable_signature_parse_compact", "(", "ctx", ",", "rec_sig", ",", "sig", ",", "recid", ")", "# Verify that the signature is parsable", "if", "not", "parsable_sig", ":", "raise", "InvalidSignatureError", "(", ")", "return", "rec_sig" ]
Returns a parsed recoverable signature of length 65 bytes
[ "Returns", "a", "parsed", "recoverable", "signature", "of", "length", "65", "bytes" ]
python
train
28.76
jeffrimko/Qprompt
lib/qprompt.py
https://github.com/jeffrimko/Qprompt/blob/1887c53656dfecac49e0650e0f912328801cbb83/lib/qprompt.py#L519-L526
def ask_yesno(msg="Proceed?", dft=None): """Prompts the user for a yes or no answer. Returns True for yes, False for no.""" yes = ["y", "yes", "Y", "YES"] no = ["n", "no", "N", "NO"] if dft != None: dft = yes[0] if (dft in yes or dft == True) else no[0] return ask(msg, dft=dft, vld=yes+no) in yes
[ "def", "ask_yesno", "(", "msg", "=", "\"Proceed?\"", ",", "dft", "=", "None", ")", ":", "yes", "=", "[", "\"y\"", ",", "\"yes\"", ",", "\"Y\"", ",", "\"YES\"", "]", "no", "=", "[", "\"n\"", ",", "\"no\"", ",", "\"N\"", ",", "\"NO\"", "]", "if", "dft", "!=", "None", ":", "dft", "=", "yes", "[", "0", "]", "if", "(", "dft", "in", "yes", "or", "dft", "==", "True", ")", "else", "no", "[", "0", "]", "return", "ask", "(", "msg", ",", "dft", "=", "dft", ",", "vld", "=", "yes", "+", "no", ")", "in", "yes" ]
Prompts the user for a yes or no answer. Returns True for yes, False for no.
[ "Prompts", "the", "user", "for", "a", "yes", "or", "no", "answer", ".", "Returns", "True", "for", "yes", "False", "for", "no", "." ]
python
train
40.25
twosigma/marbles
marbles/mixins/marbles/mixins/mixins.py
https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L911-L936
def assertFileSizeNotAlmostEqual( self, filename, size, places=None, msg=None, delta=None): '''Fail unless ``filename`` does not have the given ``size`` as determined by their difference rounded to the given number ofdecimal ``places`` (default 7) and comparing to zero, or if their difference is greater than a given ``delta``. Parameters ---------- filename : str, bytes, file-like size : int, float places : int msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. delta : int, float Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like. ''' fsize = self._get_file_size(filename) self.assertNotAlmostEqual( fsize, size, places=places, msg=msg, delta=delta)
[ "def", "assertFileSizeNotAlmostEqual", "(", "self", ",", "filename", ",", "size", ",", "places", "=", "None", ",", "msg", "=", "None", ",", "delta", "=", "None", ")", ":", "fsize", "=", "self", ".", "_get_file_size", "(", "filename", ")", "self", ".", "assertNotAlmostEqual", "(", "fsize", ",", "size", ",", "places", "=", "places", ",", "msg", "=", "msg", ",", "delta", "=", "delta", ")" ]
Fail unless ``filename`` does not have the given ``size`` as determined by their difference rounded to the given number ofdecimal ``places`` (default 7) and comparing to zero, or if their difference is greater than a given ``delta``. Parameters ---------- filename : str, bytes, file-like size : int, float places : int msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. delta : int, float Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like.
[ "Fail", "unless", "filename", "does", "not", "have", "the", "given", "size", "as", "determined", "by", "their", "difference", "rounded", "to", "the", "given", "number", "ofdecimal", "places", "(", "default", "7", ")", "and", "comparing", "to", "zero", "or", "if", "their", "difference", "is", "greater", "than", "a", "given", "delta", "." ]
python
train
35.961538
tanghaibao/jcvi
jcvi/utils/cbook.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L219-L227
def enumerate_reversed(sequence): """ Perform reverse enumeration, returning an iterator with decrementing index/position values Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python """ for index in reversed(xrange(len(sequence))): yield index, sequence[index]
[ "def", "enumerate_reversed", "(", "sequence", ")", ":", "for", "index", "in", "reversed", "(", "xrange", "(", "len", "(", "sequence", ")", ")", ")", ":", "yield", "index", ",", "sequence", "[", "index", "]" ]
Perform reverse enumeration, returning an iterator with decrementing index/position values Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
[ "Perform", "reverse", "enumeration", "returning", "an", "iterator", "with", "decrementing", "index", "/", "position", "values" ]
python
train
36.111111
onelogin/python-saml
src/onelogin/saml2/response.py
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/response.py#L780-L797
def __query(self, query, tagid=None): """ Extracts nodes that match the query from the Response :param query: Xpath Expresion :type query: String :param tagid: Tag ID :type query: String :returns: The queried nodes :rtype: list """ if self.encrypted: document = self.decrypted_document else: document = self.document return OneLogin_Saml2_Utils.query(document, query, None, tagid)
[ "def", "__query", "(", "self", ",", "query", ",", "tagid", "=", "None", ")", ":", "if", "self", ".", "encrypted", ":", "document", "=", "self", ".", "decrypted_document", "else", ":", "document", "=", "self", ".", "document", "return", "OneLogin_Saml2_Utils", ".", "query", "(", "document", ",", "query", ",", "None", ",", "tagid", ")" ]
Extracts nodes that match the query from the Response :param query: Xpath Expresion :type query: String :param tagid: Tag ID :type query: String :returns: The queried nodes :rtype: list
[ "Extracts", "nodes", "that", "match", "the", "query", "from", "the", "Response" ]
python
train
27
markovmodel/msmtools
msmtools/util/matrix/matrix.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/util/matrix/matrix.py#L44-L56
def is_sparse_file(filename): """Determine if the given filename indicates a dense or a sparse matrix If pathname is xxx.coo.yyy return True otherwise False. """ dirname, basename = os.path.split(filename) name, ext = os.path.splitext(basename) matrix_name, matrix_ext = os.path.splitext(name) if matrix_ext == '.coo': return True else: return False
[ "def", "is_sparse_file", "(", "filename", ")", ":", "dirname", ",", "basename", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "matrix_name", ",", "matrix_ext", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "if", "matrix_ext", "==", "'.coo'", ":", "return", "True", "else", ":", "return", "False" ]
Determine if the given filename indicates a dense or a sparse matrix If pathname is xxx.coo.yyy return True otherwise False.
[ "Determine", "if", "the", "given", "filename", "indicates", "a", "dense", "or", "a", "sparse", "matrix" ]
python
train
30
inveniosoftware-attic/invenio-utils
invenio_utils/html.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L759-L828
def create_tag( tag, escaper=EscapedHTMLString, opening_only=False, body=None, escape_body=False, escape_attr=True, indent=0, attrs=None, **other_attrs): """ Create an XML/HTML tag. This function create a full XML/HTML tag, putting toghether an optional inner body and a dictionary of attributes. >>> print create_html_tag ("select", create_html_tag("h1", ... "hello", other_attrs={'class': "foo"})) <select> <h1 class="foo"> hello </h1> </select> @param tag: the tag (e.g. "select", "body", "h1"...). @type tag: string @param body: some text/HTML to put in the body of the tag (this body will be indented WRT the tag). @type body: string @param escape_body: wether the body (if any) must be escaped. @type escape_body: boolean @param escape_attr: wether the attribute values (if any) must be escaped. @type escape_attr: boolean @param indent: number of level of indentation for the tag. @type indent: integer @param attrs: map of attributes to add to the tag. @type attrs: dict @return: the HTML tag. @rtype: string """ if attrs is None: attrs = {} for key, value in iteritems(other_attrs): if value is not None: if key.endswith('_'): attrs[key[:-1]] = value else: attrs[key] = value out = "<%s" % tag for key, value in iteritems(attrs): if escape_attr: value = escaper(value, escape_quotes=True) out += ' %s="%s"' % (key, value) if body is not None: if callable(body) and body.__name__ == 'handle_body': body = body() out += ">" if escape_body and not isinstance(body, EscapedString): body = escaper(body) out += body if not opening_only: out += "</%s>" % tag elif not opening_only: out += " />" if indent: out = indent_text(out, indent)[:-1] from invenio_utils.text import wash_for_utf8 return EscapedString(wash_for_utf8(out))
[ "def", "create_tag", "(", "tag", ",", "escaper", "=", "EscapedHTMLString", ",", "opening_only", "=", "False", ",", "body", "=", "None", ",", "escape_body", "=", "False", ",", "escape_attr", "=", "True", ",", "indent", "=", "0", ",", "attrs", "=", "None", ",", "*", "*", "other_attrs", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "{", "}", "for", "key", ",", "value", "in", "iteritems", "(", "other_attrs", ")", ":", "if", "value", "is", "not", "None", ":", "if", "key", ".", "endswith", "(", "'_'", ")", ":", "attrs", "[", "key", "[", ":", "-", "1", "]", "]", "=", "value", "else", ":", "attrs", "[", "key", "]", "=", "value", "out", "=", "\"<%s\"", "%", "tag", "for", "key", ",", "value", "in", "iteritems", "(", "attrs", ")", ":", "if", "escape_attr", ":", "value", "=", "escaper", "(", "value", ",", "escape_quotes", "=", "True", ")", "out", "+=", "' %s=\"%s\"'", "%", "(", "key", ",", "value", ")", "if", "body", "is", "not", "None", ":", "if", "callable", "(", "body", ")", "and", "body", ".", "__name__", "==", "'handle_body'", ":", "body", "=", "body", "(", ")", "out", "+=", "\">\"", "if", "escape_body", "and", "not", "isinstance", "(", "body", ",", "EscapedString", ")", ":", "body", "=", "escaper", "(", "body", ")", "out", "+=", "body", "if", "not", "opening_only", ":", "out", "+=", "\"</%s>\"", "%", "tag", "elif", "not", "opening_only", ":", "out", "+=", "\" />\"", "if", "indent", ":", "out", "=", "indent_text", "(", "out", ",", "indent", ")", "[", ":", "-", "1", "]", "from", "invenio_utils", ".", "text", "import", "wash_for_utf8", "return", "EscapedString", "(", "wash_for_utf8", "(", "out", ")", ")" ]
Create an XML/HTML tag. This function create a full XML/HTML tag, putting toghether an optional inner body and a dictionary of attributes. >>> print create_html_tag ("select", create_html_tag("h1", ... "hello", other_attrs={'class': "foo"})) <select> <h1 class="foo"> hello </h1> </select> @param tag: the tag (e.g. "select", "body", "h1"...). @type tag: string @param body: some text/HTML to put in the body of the tag (this body will be indented WRT the tag). @type body: string @param escape_body: wether the body (if any) must be escaped. @type escape_body: boolean @param escape_attr: wether the attribute values (if any) must be escaped. @type escape_attr: boolean @param indent: number of level of indentation for the tag. @type indent: integer @param attrs: map of attributes to add to the tag. @type attrs: dict @return: the HTML tag. @rtype: string
[ "Create", "an", "XML", "/", "HTML", "tag", "." ]
python
train
30.3
ruipgil/TrackToTrip
tracktotrip/track.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L137-L189
def to_trip( self, smooth, smooth_strategy, smooth_noise, seg, seg_eps, seg_min_time, simplify, simplify_max_dist_error, simplify_max_speed_error ): """In-place, transformation of a track into a trip A trip is a more accurate depiction of reality than a track. For a track to become a trip it need to go through the following steps: + noise removal + smoothing + spatio-temporal segmentation + simplification At the end of these steps we have a less noisy, track that has less points, but that holds the same information. It's required that each segment has their metrics calculated or has been preprocessed. Args: name: An optional string with the name of the trip. If none is given, one will be generated by generateName Returns: This Track instance """ self.compute_metrics() self.remove_noise() print (smooth, seg, simplify) if smooth: self.compute_metrics() self.smooth(smooth_strategy, smooth_noise) if seg: self.compute_metrics() self.segment(seg_eps, seg_min_time) if simplify: self.compute_metrics() self.simplify(0, simplify_max_dist_error, simplify_max_speed_error) self.compute_metrics() return self
[ "def", "to_trip", "(", "self", ",", "smooth", ",", "smooth_strategy", ",", "smooth_noise", ",", "seg", ",", "seg_eps", ",", "seg_min_time", ",", "simplify", ",", "simplify_max_dist_error", ",", "simplify_max_speed_error", ")", ":", "self", ".", "compute_metrics", "(", ")", "self", ".", "remove_noise", "(", ")", "print", "(", "smooth", ",", "seg", ",", "simplify", ")", "if", "smooth", ":", "self", ".", "compute_metrics", "(", ")", "self", ".", "smooth", "(", "smooth_strategy", ",", "smooth_noise", ")", "if", "seg", ":", "self", ".", "compute_metrics", "(", ")", "self", ".", "segment", "(", "seg_eps", ",", "seg_min_time", ")", "if", "simplify", ":", "self", ".", "compute_metrics", "(", ")", "self", ".", "simplify", "(", "0", ",", "simplify_max_dist_error", ",", "simplify_max_speed_error", ")", "self", ".", "compute_metrics", "(", ")", "return", "self" ]
In-place, transformation of a track into a trip A trip is a more accurate depiction of reality than a track. For a track to become a trip it need to go through the following steps: + noise removal + smoothing + spatio-temporal segmentation + simplification At the end of these steps we have a less noisy, track that has less points, but that holds the same information. It's required that each segment has their metrics calculated or has been preprocessed. Args: name: An optional string with the name of the trip. If none is given, one will be generated by generateName Returns: This Track instance
[ "In", "-", "place", "transformation", "of", "a", "track", "into", "a", "trip" ]
python
train
28.584906
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L495-L499
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if there is creep on the grid point. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self.state.creep[pos] != 0
[ "def", "has_creep", "(", "self", ",", "pos", ":", "Union", "[", "Point2", ",", "Point3", ",", "Unit", "]", ")", "->", "bool", ":", "assert", "isinstance", "(", "pos", ",", "(", "Point2", ",", "Point3", ",", "Unit", ")", ")", "pos", "=", "pos", ".", "position", ".", "to2", ".", "rounded", "return", "self", ".", "state", ".", "creep", "[", "pos", "]", "!=", "0" ]
Returns True if there is creep on the grid point.
[ "Returns", "True", "if", "there", "is", "creep", "on", "the", "grid", "point", "." ]
python
train
52
jic-dtool/dtool-http
dtool_http/storagebroker.py
https://github.com/jic-dtool/dtool-http/blob/7572221b07d5294aa9ead5097a4f16478837e742/dtool_http/storagebroker.py#L139-L146
def get_overlay(self, overlay_name): """Return overlay as a dictionary. :param overlay_name: name of the overlay :returns: overlay as a dictionary """ url = self.http_manifest["overlays"][overlay_name] return self._get_json_from_url(url)
[ "def", "get_overlay", "(", "self", ",", "overlay_name", ")", ":", "url", "=", "self", ".", "http_manifest", "[", "\"overlays\"", "]", "[", "overlay_name", "]", "return", "self", ".", "_get_json_from_url", "(", "url", ")" ]
Return overlay as a dictionary. :param overlay_name: name of the overlay :returns: overlay as a dictionary
[ "Return", "overlay", "as", "a", "dictionary", "." ]
python
train
34.875