repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Becksteinlab/GromacsWrapper
gromacs/fileformats/xvg.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/xvg.py#L1044-L1070
def decimate_error(self, a, maxpoints, **kwargs): """Return data *a* error-decimated on *maxpoints*. Histograms each column into *maxpoints* bins and calculates an error estimate in each bin as the decimated data, using :func:`numkit.timeseries.error_histogrammed_function`. The coarse grained time in the first column contains the centers of the histogram time. If *a* contains <= *maxpoints* then *a* is simply returned; otherwise a new array of the same dimensions but with a reduced number of *maxpoints* points is returned. .. SeeAlso:: :func:`numkit.timeseries.tcorrel` .. Note:: Assumes that the first column is time. Does not work very well because often there are too few datapoints to compute a good autocorrelation function. """ warnings.warn("Using undocumented decimate_error() is highly EXPERIMENTAL", category=gromacs.exceptions.LowAccuracyWarning) return self._decimate(numkit.timeseries.error_histogrammed_function, a, maxpoints, **kwargs)
[ "def", "decimate_error", "(", "self", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"Using undocumented decimate_error() is highly EXPERIMENTAL\"", ",", "category", "=", "gromacs", ".", "exceptions", ".", "LowAccuracyWarning", ")", "return", "self", ".", "_decimate", "(", "numkit", ".", "timeseries", ".", "error_histogrammed_function", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")" ]
Return data *a* error-decimated on *maxpoints*. Histograms each column into *maxpoints* bins and calculates an error estimate in each bin as the decimated data, using :func:`numkit.timeseries.error_histogrammed_function`. The coarse grained time in the first column contains the centers of the histogram time. If *a* contains <= *maxpoints* then *a* is simply returned; otherwise a new array of the same dimensions but with a reduced number of *maxpoints* points is returned. .. SeeAlso:: :func:`numkit.timeseries.tcorrel` .. Note:: Assumes that the first column is time. Does not work very well because often there are too few datapoints to compute a good autocorrelation function.
[ "Return", "data", "*", "a", "*", "error", "-", "decimated", "on", "*", "maxpoints", "*", "." ]
python
valid
quantopian/zipline
zipline/algorithm.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1457-L1466
def on_dt_changed(self, dt): """ Callback triggered by the simulation loop whenever the current dt changes. Any logic that should happen exactly once at the start of each datetime group should happen here. """ self.datetime = dt self.blotter.set_date(dt)
[ "def", "on_dt_changed", "(", "self", ",", "dt", ")", ":", "self", ".", "datetime", "=", "dt", "self", ".", "blotter", ".", "set_date", "(", "dt", ")" ]
Callback triggered by the simulation loop whenever the current dt changes. Any logic that should happen exactly once at the start of each datetime group should happen here.
[ "Callback", "triggered", "by", "the", "simulation", "loop", "whenever", "the", "current", "dt", "changes", "." ]
python
train
dw/mitogen
ansible_mitogen/mixins.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/mixins.py#L240-L250
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True): """ Mitogen always executes ActionBase helper methods in the context of the target user account, so it is never necessary to modify permissions except to ensure the execute bit is set if requested. """ LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)', remote_paths, remote_user, execute) if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING: return self._remote_chmod(remote_paths, mode='u+x') return self.COMMAND_RESULT.copy()
[ "def", "_fixup_perms2", "(", "self", ",", "remote_paths", ",", "remote_user", "=", "None", ",", "execute", "=", "True", ")", ":", "LOG", ".", "debug", "(", "'_fixup_perms2(%r, remote_user=%r, execute=%r)'", ",", "remote_paths", ",", "remote_user", ",", "execute", ")", "if", "execute", "and", "self", ".", "_task", ".", "action", "not", "in", "self", ".", "FIXUP_PERMS_RED_HERRING", ":", "return", "self", ".", "_remote_chmod", "(", "remote_paths", ",", "mode", "=", "'u+x'", ")", "return", "self", ".", "COMMAND_RESULT", ".", "copy", "(", ")" ]
Mitogen always executes ActionBase helper methods in the context of the target user account, so it is never necessary to modify permissions except to ensure the execute bit is set if requested.
[ "Mitogen", "always", "executes", "ActionBase", "helper", "methods", "in", "the", "context", "of", "the", "target", "user", "account", "so", "it", "is", "never", "necessary", "to", "modify", "permissions", "except", "to", "ensure", "the", "execute", "bit", "is", "set", "if", "requested", "." ]
python
train
bjmorgan/lattice_mc
lattice_mc/jump.py
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/jump.py#L79-L90
def nearest_neighbour_delta_E( self ): """ Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour) """ delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 # -1 because the hopping ion is not counted in the final site occupation number return ( delta_nn * self.nearest_neighbour_energy )
[ "def", "nearest_neighbour_delta_E", "(", "self", ")", ":", "delta_nn", "=", "self", ".", "final_site", ".", "nn_occupation", "(", ")", "-", "self", ".", "initial_site", ".", "nn_occupation", "(", ")", "-", "1", "# -1 because the hopping ion is not counted in the final site occupation number", "return", "(", "delta_nn", "*", "self", ".", "nearest_neighbour_energy", ")" ]
Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour)
[ "Nearest", "-", "neighbour", "interaction", "contribution", "to", "the", "change", "in", "system", "energy", "if", "this", "jump", "were", "accepted", "." ]
python
train
OSSOS/MOP
src/ossos/core/scripts/mk_mopheader.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/scripts/mk_mopheader.py#L36-L67
def mk_mopheader(expnum, ccd, version, dry_run=False, prefix=""): """Run the OSSOS mopheader script. """ ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) ## get image from the vospace storage area filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) logging.info("Running mopheader on %s %d" % (expnum, ccd)) ## launch the mopheader script ## launch the makepsf script expname = os.path.basename(filename).strip('.fits') logging.info(util.exec_prog(['stepZjmp', '-f', expname])) mopheader_filename = expname+".mopheader" # mopheader_filename = mopheader.main(filename) if dry_run: return destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader') source = mopheader_filename storage.copy(source, destination) return
[ "def", "mk_mopheader", "(", "expnum", ",", "ccd", ",", "version", ",", "dry_run", "=", "False", ",", "prefix", "=", "\"\"", ")", ":", "## confirm destination directory exists.", "destdir", "=", "os", ".", "path", ".", "dirname", "(", "storage", ".", "dbimages_uri", "(", "expnum", ",", "ccd", ",", "prefix", "=", "prefix", ",", "version", "=", "version", ",", "ext", "=", "'fits'", ")", ")", "if", "not", "dry_run", ":", "storage", ".", "mkdir", "(", "destdir", ")", "## get image from the vospace storage area", "filename", "=", "storage", ".", "get_image", "(", "expnum", ",", "ccd", ",", "version", "=", "version", ",", "prefix", "=", "prefix", ")", "logging", ".", "info", "(", "\"Running mopheader on %s %d\"", "%", "(", "expnum", ",", "ccd", ")", ")", "## launch the mopheader script", "## launch the makepsf script", "expname", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", ".", "strip", "(", "'.fits'", ")", "logging", ".", "info", "(", "util", ".", "exec_prog", "(", "[", "'stepZjmp'", ",", "'-f'", ",", "expname", "]", ")", ")", "mopheader_filename", "=", "expname", "+", "\".mopheader\"", "# mopheader_filename = mopheader.main(filename)", "if", "dry_run", ":", "return", "destination", "=", "storage", ".", "dbimages_uri", "(", "expnum", ",", "ccd", ",", "prefix", "=", "prefix", ",", "version", "=", "version", ",", "ext", "=", "'mopheader'", ")", "source", "=", "mopheader_filename", "storage", ".", "copy", "(", "source", ",", "destination", ")", "return" ]
Run the OSSOS mopheader script.
[ "Run", "the", "OSSOS", "mopheader", "script", "." ]
python
train
belbio/bel
bel/nanopub/pubmed.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L62-L135
def get_pubtator(pmid): """Get Pubtator Bioconcepts from Pubmed Abstract Re-configure the denotations into an annotation dictionary format and collapse duplicate terms so that their spans are in a list. """ r = get_url(PUBTATOR_TMPL.replace("PMID", pmid), timeout=10) if r and r.status_code == 200: pubtator = r.json()[0] else: log.error( f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}" ) return None known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"] for idx, anno in enumerate(pubtator["denotations"]): s_match = re.match(r"(\w+):(\w+)", anno["obj"]) c_match = re.match(r"(\w+):(\w+):(\w+)", anno["obj"]) if c_match: (ctype, namespace, cid) = ( c_match.group(1), c_match.group(2), c_match.group(3), ) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") if namespace not in known_types: log.info(f"{namespace} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) elif s_match: (ctype, cid) = (s_match.group(1), s_match.group(2)) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) annotations = {} for anno in pubtator["denotations"]: log.info(anno) if anno["obj"] not in annotations: annotations[anno["obj"]] = {"spans": [anno["span"]]} annotations[anno["obj"]]["entity_types"] = [anno.get("entity_type", [])] annotations[anno["obj"]]["annotation_types"] = [ anno.get("annotation_type", []) ] else: annotations[anno["obj"]]["spans"].append(anno["span"]) del pubtator["denotations"] pubtator["annotations"] = copy.deepcopy(annotations) return pubtator
[ "def", "get_pubtator", "(", "pmid", ")", ":", "r", "=", "get_url", "(", "PUBTATOR_TMPL", ".", "replace", "(", "\"PMID\"", ",", "pmid", ")", ",", "timeout", "=", "10", ")", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "pubtator", "=", "r", ".", "json", "(", ")", "[", "0", "]", "else", ":", "log", ".", "error", "(", "f\"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}\"", ")", "return", "None", "known_types", "=", "[", "\"CHEBI\"", ",", "\"Chemical\"", ",", "\"Disease\"", ",", "\"Gene\"", ",", "\"Species\"", "]", "for", "idx", ",", "anno", "in", "enumerate", "(", "pubtator", "[", "\"denotations\"", "]", ")", ":", "s_match", "=", "re", ".", "match", "(", "r\"(\\w+):(\\w+)\"", ",", "anno", "[", "\"obj\"", "]", ")", "c_match", "=", "re", ".", "match", "(", "r\"(\\w+):(\\w+):(\\w+)\"", ",", "anno", "[", "\"obj\"", "]", ")", "if", "c_match", ":", "(", "ctype", ",", "namespace", ",", "cid", ")", "=", "(", "c_match", ".", "group", "(", "1", ")", ",", "c_match", ".", "group", "(", "2", ")", ",", "c_match", ".", "group", "(", "3", ")", ",", ")", "if", "ctype", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{ctype} not in known_types for Pubtator\"", ")", "if", "namespace", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{namespace} not in known_types for Pubtator\"", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"obj\"", "]", "=", "f'{pubtator_ns_convert.get(namespace, \"UNKNOWN\")}:{cid}'", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"entity_type\"", "]", "=", "pubtator_entity_convert", ".", "get", "(", "ctype", ",", "None", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"annotation_type\"", "]", "=", "pubtator_annotation_convert", ".", "get", "(", "ctype", ",", "None", ")", "elif", "s_match", ":", "(", "ctype", ",", "cid", ")", "=", "(", "s_match", ".", "group", "(", "1", ")", ",", "s_match", ".", "group", "(", "2", ")", ")", "if", "ctype", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{ctype} not in known_types for Pubtator\"", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"obj\"", "]", "=", "f'{pubtator_ns_convert.get(ctype, \"UNKNOWN\")}:{cid}'", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"entity_type\"", "]", "=", "pubtator_entity_convert", ".", "get", "(", "ctype", ",", "None", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"annotation_type\"", "]", "=", "pubtator_annotation_convert", ".", "get", "(", "ctype", ",", "None", ")", "annotations", "=", "{", "}", "for", "anno", "in", "pubtator", "[", "\"denotations\"", "]", ":", "log", ".", "info", "(", "anno", ")", "if", "anno", "[", "\"obj\"", "]", "not", "in", "annotations", ":", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "=", "{", "\"spans\"", ":", "[", "anno", "[", "\"span\"", "]", "]", "}", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"entity_types\"", "]", "=", "[", "anno", ".", "get", "(", "\"entity_type\"", ",", "[", "]", ")", "]", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"annotation_types\"", "]", "=", "[", "anno", ".", "get", "(", "\"annotation_type\"", ",", "[", "]", ")", "]", "else", ":", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"spans\"", "]", ".", "append", "(", "anno", "[", "\"span\"", "]", ")", "del", "pubtator", "[", "\"denotations\"", "]", "pubtator", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "annotations", ")", "return", "pubtator" ]
Get Pubtator Bioconcepts from Pubmed Abstract Re-configure the denotations into an annotation dictionary format and collapse duplicate terms so that their spans are in a list.
[ "Get", "Pubtator", "Bioconcepts", "from", "Pubmed", "Abstract" ]
python
train
krischer/mtspec
mtspec/util.py
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/util.py#L43-L54
def _load_mtdata(gzfile): """ Simple helper function that finds the test data in the directory tree and loads it using :func:`gzip.open` and :func:`numpy.loadtxt`. :param gzfile: Filename :type gzfile: str :returns: data :rtype: numpy.ndarray """ path = os.path.join(os.path.dirname(__file__), 'tests', 'data', gzfile) return np.loadtxt(gzip.open(path))
[ "def", "_load_mtdata", "(", "gzfile", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'tests'", ",", "'data'", ",", "gzfile", ")", "return", "np", ".", "loadtxt", "(", "gzip", ".", "open", "(", "path", ")", ")" ]
Simple helper function that finds the test data in the directory tree and loads it using :func:`gzip.open` and :func:`numpy.loadtxt`. :param gzfile: Filename :type gzfile: str :returns: data :rtype: numpy.ndarray
[ "Simple", "helper", "function", "that", "finds", "the", "test", "data", "in", "the", "directory", "tree", "and", "loads", "it", "using", ":", "func", ":", "gzip", ".", "open", "and", ":", "func", ":", "numpy", ".", "loadtxt", "." ]
python
train
oauthlib/oauthlib
oauthlib/oauth2/rfc6749/request_validator.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/request_validator.py#L294-L303
def save_token(self, token, request, *args, **kwargs): """Persist the token with a token type specific method. Currently, only save_bearer_token is supported. :param token: A (Bearer) token dict. :param request: OAuthlib request. :type request: oauthlib.common.Request """ return self.save_bearer_token(token, request, *args, **kwargs)
[ "def", "save_token", "(", "self", ",", "token", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "save_bearer_token", "(", "token", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Persist the token with a token type specific method. Currently, only save_bearer_token is supported. :param token: A (Bearer) token dict. :param request: OAuthlib request. :type request: oauthlib.common.Request
[ "Persist", "the", "token", "with", "a", "token", "type", "specific", "method", "." ]
python
train
projectshift/shift-boiler
boiler/user/user_service.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/user_service.py#L358-L385
def register(self, user_data, base_confirm_url='', send_welcome=True): """ Register user Accepts user data, validates it and performs registration. Will send a welcome message with a confirmation link on success. :param user_data: dic, populate user with data :param send_welcome: bool, whether to send welcome or skip it (testing) :param base_confirm_url: str, base confirmation link url :return: boiler.user.models.User """ user = self.__model__(**user_data) schema = RegisterSchema() valid = schema.process(user) if not valid: return valid db.session.add(user) db.session.commit() if not user.id: return False # send welcome message if send_welcome: self.send_welcome_message(user, base_confirm_url) events.register_event.send(user) return user
[ "def", "register", "(", "self", ",", "user_data", ",", "base_confirm_url", "=", "''", ",", "send_welcome", "=", "True", ")", ":", "user", "=", "self", ".", "__model__", "(", "*", "*", "user_data", ")", "schema", "=", "RegisterSchema", "(", ")", "valid", "=", "schema", ".", "process", "(", "user", ")", "if", "not", "valid", ":", "return", "valid", "db", ".", "session", ".", "add", "(", "user", ")", "db", ".", "session", ".", "commit", "(", ")", "if", "not", "user", ".", "id", ":", "return", "False", "# send welcome message", "if", "send_welcome", ":", "self", ".", "send_welcome_message", "(", "user", ",", "base_confirm_url", ")", "events", ".", "register_event", ".", "send", "(", "user", ")", "return", "user" ]
Register user Accepts user data, validates it and performs registration. Will send a welcome message with a confirmation link on success. :param user_data: dic, populate user with data :param send_welcome: bool, whether to send welcome or skip it (testing) :param base_confirm_url: str, base confirmation link url :return: boiler.user.models.User
[ "Register", "user", "Accepts", "user", "data", "validates", "it", "and", "performs", "registration", ".", "Will", "send", "a", "welcome", "message", "with", "a", "confirmation", "link", "on", "success", "." ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/transformations.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/transformations.py#L305-L308
def _choi_to_chi(data, input_dim, output_dim): """Transform Choi representation to the Chi representation.""" num_qubits = int(np.log2(input_dim)) return _transform_to_pauli(data, num_qubits)
[ "def", "_choi_to_chi", "(", "data", ",", "input_dim", ",", "output_dim", ")", ":", "num_qubits", "=", "int", "(", "np", ".", "log2", "(", "input_dim", ")", ")", "return", "_transform_to_pauli", "(", "data", ",", "num_qubits", ")" ]
Transform Choi representation to the Chi representation.
[ "Transform", "Choi", "representation", "to", "the", "Chi", "representation", "." ]
python
test
divio/django-filer
filer/server/backends/base.py
https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/server/backends/base.py#L24-L40
def save_as_header(self, response, **kwargs): """ * if save_as is False the header will not be added * if save_as is a filename, it will be used in the header * if save_as is True or None the filename will be determined from the file path """ save_as = kwargs.get('save_as', None) if save_as is False: return file_obj = kwargs.get('file_obj', None) if save_as is True or save_as is None: filename = os.path.basename(file_obj.path) else: filename = save_as response['Content-Disposition'] = smart_str( 'attachment; filename=%s' % filename)
[ "def", "save_as_header", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "save_as", "=", "kwargs", ".", "get", "(", "'save_as'", ",", "None", ")", "if", "save_as", "is", "False", ":", "return", "file_obj", "=", "kwargs", ".", "get", "(", "'file_obj'", ",", "None", ")", "if", "save_as", "is", "True", "or", "save_as", "is", "None", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "file_obj", ".", "path", ")", "else", ":", "filename", "=", "save_as", "response", "[", "'Content-Disposition'", "]", "=", "smart_str", "(", "'attachment; filename=%s'", "%", "filename", ")" ]
* if save_as is False the header will not be added * if save_as is a filename, it will be used in the header * if save_as is True or None the filename will be determined from the file path
[ "*", "if", "save_as", "is", "False", "the", "header", "will", "not", "be", "added", "*", "if", "save_as", "is", "a", "filename", "it", "will", "be", "used", "in", "the", "header", "*", "if", "save_as", "is", "True", "or", "None", "the", "filename", "will", "be", "determined", "from", "the", "file", "path" ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L4609-L4629
def get_overlay_data_start_offset(self): """Get the offset of data appended to the file and not contained within the area described in the headers.""" highest_PointerToRawData = 0 highest_SizeOfRawData = 0 for section in self.sections: # If a section seems to fall outside the boundaries of the file we assume it's either # because of intentionally misleading values or because the file is truncated # In either case we skip it if section.PointerToRawData + section.SizeOfRawData > len(self.__data__): continue if section.PointerToRawData + section.SizeOfRawData > highest_PointerToRawData + highest_SizeOfRawData: highest_PointerToRawData = section.PointerToRawData highest_SizeOfRawData = section.SizeOfRawData if len(self.__data__) > highest_PointerToRawData + highest_SizeOfRawData: return highest_PointerToRawData + highest_SizeOfRawData return None
[ "def", "get_overlay_data_start_offset", "(", "self", ")", ":", "highest_PointerToRawData", "=", "0", "highest_SizeOfRawData", "=", "0", "for", "section", "in", "self", ".", "sections", ":", "# If a section seems to fall outside the boundaries of the file we assume it's either", "# because of intentionally misleading values or because the file is truncated", "# In either case we skip it", "if", "section", ".", "PointerToRawData", "+", "section", ".", "SizeOfRawData", ">", "len", "(", "self", ".", "__data__", ")", ":", "continue", "if", "section", ".", "PointerToRawData", "+", "section", ".", "SizeOfRawData", ">", "highest_PointerToRawData", "+", "highest_SizeOfRawData", ":", "highest_PointerToRawData", "=", "section", ".", "PointerToRawData", "highest_SizeOfRawData", "=", "section", ".", "SizeOfRawData", "if", "len", "(", "self", ".", "__data__", ")", ">", "highest_PointerToRawData", "+", "highest_SizeOfRawData", ":", "return", "highest_PointerToRawData", "+", "highest_SizeOfRawData", "return", "None" ]
Get the offset of data appended to the file and not contained within the area described in the headers.
[ "Get", "the", "offset", "of", "data", "appended", "to", "the", "file", "and", "not", "contained", "within", "the", "area", "described", "in", "the", "headers", "." ]
python
train
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L866-L873
def poly(self, return_coeffs=False): """returns the quadratic as a Polynomial object.""" p = self.bpoints() coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0]) if return_coeffs: return coeffs else: return np.poly1d(coeffs)
[ "def", "poly", "(", "self", ",", "return_coeffs", "=", "False", ")", ":", "p", "=", "self", ".", "bpoints", "(", ")", "coeffs", "=", "(", "p", "[", "0", "]", "-", "2", "*", "p", "[", "1", "]", "+", "p", "[", "2", "]", ",", "2", "*", "(", "p", "[", "1", "]", "-", "p", "[", "0", "]", ")", ",", "p", "[", "0", "]", ")", "if", "return_coeffs", ":", "return", "coeffs", "else", ":", "return", "np", ".", "poly1d", "(", "coeffs", ")" ]
returns the quadratic as a Polynomial object.
[ "returns", "the", "quadratic", "as", "a", "Polynomial", "object", "." ]
python
train
groveco/django-sql-explorer
explorer/forms.py
https://github.com/groveco/django-sql-explorer/blob/622b96354e1d7ae7f7105ae90b8da3247e028623/explorer/forms.py#L11-L28
def validate(self, value): """ Ensure that the SQL passes the blacklist. :param value: The SQL for this Query model. """ query = Query(sql=value) passes_blacklist, failing_words = query.passes_blacklist() error = MSG_FAILED_BLACKLIST % ', '.join(failing_words) if not passes_blacklist else None if error: raise ValidationError( error, code="InvalidSql" )
[ "def", "validate", "(", "self", ",", "value", ")", ":", "query", "=", "Query", "(", "sql", "=", "value", ")", "passes_blacklist", ",", "failing_words", "=", "query", ".", "passes_blacklist", "(", ")", "error", "=", "MSG_FAILED_BLACKLIST", "%", "', '", ".", "join", "(", "failing_words", ")", "if", "not", "passes_blacklist", "else", "None", "if", "error", ":", "raise", "ValidationError", "(", "error", ",", "code", "=", "\"InvalidSql\"", ")" ]
Ensure that the SQL passes the blacklist. :param value: The SQL for this Query model.
[ "Ensure", "that", "the", "SQL", "passes", "the", "blacklist", "." ]
python
train
pkgw/pwkit
pwkit/dulk_models.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/dulk_models.py#L251-L284
def calc_synch_eta(b, ne, delta, sinth, nu, E0=1.): """Calculate the relativistic synchrotron emission coefficient η_ν. This is Dulk (1985) equation 40, which is an approximation assuming a power-law electron population. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than E0. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. nu The frequency at which to calculate η, in Hz. The equation is valid for It's not specified for what range of values the expressions work well. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the emission coefficient (AKA "emissivity"), in units of ``erg s^-1 Hz^-1 cm^-3 sr^-1``. No complaints are raised if you attempt to use the equation outside of its range of validity. """ s = nu / calc_nu_b(b) return (b * ne * 8.6e-24 * (delta - 1) * sinth * (0.175 * s / (E0**2 * sinth))**(0.5 * (1 - delta)))
[ "def", "calc_synch_eta", "(", "b", ",", "ne", ",", "delta", ",", "sinth", ",", "nu", ",", "E0", "=", "1.", ")", ":", "s", "=", "nu", "/", "calc_nu_b", "(", "b", ")", "return", "(", "b", "*", "ne", "*", "8.6e-24", "*", "(", "delta", "-", "1", ")", "*", "sinth", "*", "(", "0.175", "*", "s", "/", "(", "E0", "**", "2", "*", "sinth", ")", ")", "**", "(", "0.5", "*", "(", "1", "-", "delta", ")", ")", ")" ]
Calculate the relativistic synchrotron emission coefficient η_ν. This is Dulk (1985) equation 40, which is an approximation assuming a power-law electron population. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than E0. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. nu The frequency at which to calculate η, in Hz. The equation is valid for It's not specified for what range of values the expressions work well. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the emission coefficient (AKA "emissivity"), in units of ``erg s^-1 Hz^-1 cm^-3 sr^-1``. No complaints are raised if you attempt to use the equation outside of its range of validity.
[ "Calculate", "the", "relativistic", "synchrotron", "emission", "coefficient", "η_ν", "." ]
python
train
brainiak/brainiak
examples/factoranalysis/htfa_cv_example.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/examples/factoranalysis/htfa_cv_example.py#L26-L54
def recon_err(data, F, W): """Calcuate reconstruction error Parameters ---------- data : 2D array True data to recover. F : 2D array HTFA factor matrix. W : 2D array HTFA weight matrix. Returns ------- float Returns root mean squared reconstruction error. """ recon = F.dot(W).ravel() err = mean_squared_error( data.ravel(), recon, multioutput='uniform_average') return math.sqrt(err)
[ "def", "recon_err", "(", "data", ",", "F", ",", "W", ")", ":", "recon", "=", "F", ".", "dot", "(", "W", ")", ".", "ravel", "(", ")", "err", "=", "mean_squared_error", "(", "data", ".", "ravel", "(", ")", ",", "recon", ",", "multioutput", "=", "'uniform_average'", ")", "return", "math", ".", "sqrt", "(", "err", ")" ]
Calcuate reconstruction error Parameters ---------- data : 2D array True data to recover. F : 2D array HTFA factor matrix. W : 2D array HTFA weight matrix. Returns ------- float Returns root mean squared reconstruction error.
[ "Calcuate", "reconstruction", "error" ]
python
train
portantier/habu
habu/lib/ip.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/lib/ip.py#L14-L28
def get_internal_ip(): """Get the local IP addresses.""" nics = {} for interface_name in interfaces(): addresses = ifaddresses(interface_name) try: nics[interface_name] = { 'ipv4': addresses[AF_INET], 'link_layer': addresses[AF_LINK], 'ipv6': addresses[AF_INET6], } except KeyError: pass return nics
[ "def", "get_internal_ip", "(", ")", ":", "nics", "=", "{", "}", "for", "interface_name", "in", "interfaces", "(", ")", ":", "addresses", "=", "ifaddresses", "(", "interface_name", ")", "try", ":", "nics", "[", "interface_name", "]", "=", "{", "'ipv4'", ":", "addresses", "[", "AF_INET", "]", ",", "'link_layer'", ":", "addresses", "[", "AF_LINK", "]", ",", "'ipv6'", ":", "addresses", "[", "AF_INET6", "]", ",", "}", "except", "KeyError", ":", "pass", "return", "nics" ]
Get the local IP addresses.
[ "Get", "the", "local", "IP", "addresses", "." ]
python
train
tinybike/coinbridge
coinbridge/__init__.py
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L384-L398
def listtransactions(self, user_id="", count=10, start_at=0): """List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account """ txlist = self.rpc.call("listtransactions", user_id, count, start_at) self.logger.debug("Got transaction list for " + str(user_id)) return txlist
[ "def", "listtransactions", "(", "self", ",", "user_id", "=", "\"\"", ",", "count", "=", "10", ",", "start_at", "=", "0", ")", ":", "txlist", "=", "self", ".", "rpc", ".", "call", "(", "\"listtransactions\"", ",", "user_id", ",", "count", ",", "start_at", ")", "self", ".", "logger", ".", "debug", "(", "\"Got transaction list for \"", "+", "str", "(", "user_id", ")", ")", "return", "txlist" ]
List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account
[ "List", "all", "transactions", "associated", "with", "this", "account", "." ]
python
train
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/iopubwatcher.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/iopubwatcher.py#L27-L75
def main(connection_file): """watch iopub channel, and print messages""" ctx = zmq.Context.instance() with open(connection_file) as f: cfg = json.loads(f.read()) location = cfg['location'] reg_url = cfg['url'] session = Session(key=str_to_bytes(cfg['exec_key'])) query = ctx.socket(zmq.DEALER) query.connect(disambiguate_url(cfg['url'], location)) session.send(query, "connection_request") idents,msg = session.recv(query, mode=0) c = msg['content'] iopub_url = disambiguate_url(c['iopub'], location) sub = ctx.socket(zmq.SUB) # This will subscribe to all messages: sub.setsockopt(zmq.SUBSCRIBE, b'') # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes # to everything from engine 1, but there is no way to subscribe to # just stdout from everyone. # multiple calls to subscribe will add subscriptions, e.g. to subscribe to # engine 1's stderr and engine 2's stdout: # sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr') # sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout') sub.connect(iopub_url) while True: try: idents,msg = session.recv(sub, mode=0) except KeyboardInterrupt: return # ident always length 1 here topic = idents[0] if msg['msg_type'] == 'stream': # stdout/stderr # stream names are in msg['content']['name'], if you want to handle # them differently print("%s: %s" % (topic, msg['content']['data'])) elif msg['msg_type'] == 'pyerr': # Python traceback c = msg['content'] print(topic + ':') for line in c['traceback']: # indent lines print(' ' + line)
[ "def", "main", "(", "connection_file", ")", ":", "ctx", "=", "zmq", ".", "Context", ".", "instance", "(", ")", "with", "open", "(", "connection_file", ")", "as", "f", ":", "cfg", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "location", "=", "cfg", "[", "'location'", "]", "reg_url", "=", "cfg", "[", "'url'", "]", "session", "=", "Session", "(", "key", "=", "str_to_bytes", "(", "cfg", "[", "'exec_key'", "]", ")", ")", "query", "=", "ctx", ".", "socket", "(", "zmq", ".", "DEALER", ")", "query", ".", "connect", "(", "disambiguate_url", "(", "cfg", "[", "'url'", "]", ",", "location", ")", ")", "session", ".", "send", "(", "query", ",", "\"connection_request\"", ")", "idents", ",", "msg", "=", "session", ".", "recv", "(", "query", ",", "mode", "=", "0", ")", "c", "=", "msg", "[", "'content'", "]", "iopub_url", "=", "disambiguate_url", "(", "c", "[", "'iopub'", "]", ",", "location", ")", "sub", "=", "ctx", ".", "socket", "(", "zmq", ".", "SUB", ")", "# This will subscribe to all messages:", "sub", ".", "setsockopt", "(", "zmq", ".", "SUBSCRIBE", ",", "b''", ")", "# replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout", "# 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes", "# to everything from engine 1, but there is no way to subscribe to", "# just stdout from everyone.", "# multiple calls to subscribe will add subscriptions, e.g. to subscribe to", "# engine 1's stderr and engine 2's stdout:", "# sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr')", "# sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout')", "sub", ".", "connect", "(", "iopub_url", ")", "while", "True", ":", "try", ":", "idents", ",", "msg", "=", "session", ".", "recv", "(", "sub", ",", "mode", "=", "0", ")", "except", "KeyboardInterrupt", ":", "return", "# ident always length 1 here", "topic", "=", "idents", "[", "0", "]", "if", "msg", "[", "'msg_type'", "]", "==", "'stream'", ":", "# stdout/stderr", "# stream names are in msg['content']['name'], if you want to handle", "# them differently", "print", "(", "\"%s: %s\"", "%", "(", "topic", ",", "msg", "[", "'content'", "]", "[", "'data'", "]", ")", ")", "elif", "msg", "[", "'msg_type'", "]", "==", "'pyerr'", ":", "# Python traceback", "c", "=", "msg", "[", "'content'", "]", "print", "(", "topic", "+", "':'", ")", "for", "line", "in", "c", "[", "'traceback'", "]", ":", "# indent lines", "print", "(", "' '", "+", "line", ")" ]
watch iopub channel, and print messages
[ "watch", "iopub", "channel", "and", "print", "messages" ]
python
test
wtsi-hgi/python-baton-wrapper
baton/_baton/api.py
https://github.com/wtsi-hgi/python-baton-wrapper/blob/ae0c9e3630e2c4729a0614cc86f493688436b0b7/baton/_baton/api.py#L21-L29
def connect_to_irods_with_baton(baton_binaries_directory: str, skip_baton_binaries_validation: bool=False) \ -> Connection: """ Convenience method to create a pseudo connection to iRODS. :param baton_binaries_directory: see `Connection.__init__` :param skip_baton_binaries_validation: see `Connection.__init__` :return: pseudo connection to iRODS """ return Connection(baton_binaries_directory, skip_baton_binaries_validation)
[ "def", "connect_to_irods_with_baton", "(", "baton_binaries_directory", ":", "str", ",", "skip_baton_binaries_validation", ":", "bool", "=", "False", ")", "->", "Connection", ":", "return", "Connection", "(", "baton_binaries_directory", ",", "skip_baton_binaries_validation", ")" ]
Convenience method to create a pseudo connection to iRODS. :param baton_binaries_directory: see `Connection.__init__` :param skip_baton_binaries_validation: see `Connection.__init__` :return: pseudo connection to iRODS
[ "Convenience", "method", "to", "create", "a", "pseudo", "connection", "to", "iRODS", ".", ":", "param", "baton_binaries_directory", ":", "see", "Connection", ".", "__init__", ":", "param", "skip_baton_binaries_validation", ":", "see", "Connection", ".", "__init__", ":", "return", ":", "pseudo", "connection", "to", "iRODS" ]
python
train
smartfile/python-librsync
librsync/__init__.py
https://github.com/smartfile/python-librsync/blob/1859a5f44317dce3e0997c740ac0f8675d77c4e3/librsync/__init__.py#L118-L152
def _execute(job, f, o=None): """ Executes a librsync "job" by reading bytes from `f` and writing results to `o` if provided. If `o` is omitted, the output is ignored. """ # Re-use the same buffer for output, we will read from it after each # iteration. out = ctypes.create_string_buffer(RS_JOB_BLOCKSIZE) while True: block = f.read(RS_JOB_BLOCKSIZE) buff = Buffer() # provide the data block via input buffer. buff.next_in = ctypes.c_char_p(block) buff.avail_in = ctypes.c_size_t(len(block)) buff.eof_in = ctypes.c_int(not block) # Set up our buffer for output. buff.next_out = ctypes.cast(out, ctypes.c_char_p) buff.avail_out = ctypes.c_size_t(RS_JOB_BLOCKSIZE) r = _librsync.rs_job_iter(job, ctypes.byref(buff)) if o: o.write(out.raw[:RS_JOB_BLOCKSIZE - buff.avail_out]) if r == RS_DONE: break elif r != RS_BLOCKED: raise LibrsyncError(r) if buff.avail_in > 0: # There is data left in the input buffer, librsync did not consume # all of it. Rewind the file a bit so we include that data in our # next read. It would be better to simply tack data to the end of # this buffer, but that is very difficult in Python. f.seek(f.tell() - buff.avail_in) if o and callable(getattr(o, 'seek', None)): # As a matter of convenience, rewind the output file. o.seek(0) return o
[ "def", "_execute", "(", "job", ",", "f", ",", "o", "=", "None", ")", ":", "# Re-use the same buffer for output, we will read from it after each", "# iteration.", "out", "=", "ctypes", ".", "create_string_buffer", "(", "RS_JOB_BLOCKSIZE", ")", "while", "True", ":", "block", "=", "f", ".", "read", "(", "RS_JOB_BLOCKSIZE", ")", "buff", "=", "Buffer", "(", ")", "# provide the data block via input buffer.", "buff", ".", "next_in", "=", "ctypes", ".", "c_char_p", "(", "block", ")", "buff", ".", "avail_in", "=", "ctypes", ".", "c_size_t", "(", "len", "(", "block", ")", ")", "buff", ".", "eof_in", "=", "ctypes", ".", "c_int", "(", "not", "block", ")", "# Set up our buffer for output.", "buff", ".", "next_out", "=", "ctypes", ".", "cast", "(", "out", ",", "ctypes", ".", "c_char_p", ")", "buff", ".", "avail_out", "=", "ctypes", ".", "c_size_t", "(", "RS_JOB_BLOCKSIZE", ")", "r", "=", "_librsync", ".", "rs_job_iter", "(", "job", ",", "ctypes", ".", "byref", "(", "buff", ")", ")", "if", "o", ":", "o", ".", "write", "(", "out", ".", "raw", "[", ":", "RS_JOB_BLOCKSIZE", "-", "buff", ".", "avail_out", "]", ")", "if", "r", "==", "RS_DONE", ":", "break", "elif", "r", "!=", "RS_BLOCKED", ":", "raise", "LibrsyncError", "(", "r", ")", "if", "buff", ".", "avail_in", ">", "0", ":", "# There is data left in the input buffer, librsync did not consume", "# all of it. Rewind the file a bit so we include that data in our", "# next read. It would be better to simply tack data to the end of", "# this buffer, but that is very difficult in Python.", "f", ".", "seek", "(", "f", ".", "tell", "(", ")", "-", "buff", ".", "avail_in", ")", "if", "o", "and", "callable", "(", "getattr", "(", "o", ",", "'seek'", ",", "None", ")", ")", ":", "# As a matter of convenience, rewind the output file.", "o", ".", "seek", "(", "0", ")", "return", "o" ]
Executes a librsync "job" by reading bytes from `f` and writing results to `o` if provided. If `o` is omitted, the output is ignored.
[ "Executes", "a", "librsync", "job", "by", "reading", "bytes", "from", "f", "and", "writing", "results", "to", "o", "if", "provided", ".", "If", "o", "is", "omitted", "the", "output", "is", "ignored", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/sound.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sound.py#L224-L249
def play_note(self, note, duration, volume=100, play_type=PLAY_WAIT_FOR_COMPLETE): """ Plays a note, given by its name as defined in ``_NOTE_FREQUENCIES``. :param string note: The note symbol with its octave number :param float duration: Tone duration, in seconds :param int volume: The play volume, in percent of maximum volume :param play_type: The behavior of ``play_note`` once playback has been initiated :type play_type: ``Sound.PLAY_WAIT_FOR_COMPLETE``, ``Sound.PLAY_NO_WAIT_FOR_COMPLETE`` or ``Sound.PLAY_LOOP`` :return: When ``Sound.PLAY_NO_WAIT_FOR_COMPLETE`` is specified, returns the PID of the underlying beep command; ``None`` otherwise :raises ValueError: is invalid parameter (note, duration,...) """ self._validate_play_type(play_type) try: freq = self._NOTE_FREQUENCIES.get(note.upper(), self._NOTE_FREQUENCIES[note]) except KeyError: raise ValueError('invalid note (%s)' % note) if duration <= 0: raise ValueError('invalid duration (%s)' % duration) if not 0 < volume <= 100: raise ValueError('invalid volume (%s)' % volume) return self.play_tone(freq, duration=duration, volume=volume, play_type=play_type)
[ "def", "play_note", "(", "self", ",", "note", ",", "duration", ",", "volume", "=", "100", ",", "play_type", "=", "PLAY_WAIT_FOR_COMPLETE", ")", ":", "self", ".", "_validate_play_type", "(", "play_type", ")", "try", ":", "freq", "=", "self", ".", "_NOTE_FREQUENCIES", ".", "get", "(", "note", ".", "upper", "(", ")", ",", "self", ".", "_NOTE_FREQUENCIES", "[", "note", "]", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "'invalid note (%s)'", "%", "note", ")", "if", "duration", "<=", "0", ":", "raise", "ValueError", "(", "'invalid duration (%s)'", "%", "duration", ")", "if", "not", "0", "<", "volume", "<=", "100", ":", "raise", "ValueError", "(", "'invalid volume (%s)'", "%", "volume", ")", "return", "self", ".", "play_tone", "(", "freq", ",", "duration", "=", "duration", ",", "volume", "=", "volume", ",", "play_type", "=", "play_type", ")" ]
Plays a note, given by its name as defined in ``_NOTE_FREQUENCIES``. :param string note: The note symbol with its octave number :param float duration: Tone duration, in seconds :param int volume: The play volume, in percent of maximum volume :param play_type: The behavior of ``play_note`` once playback has been initiated :type play_type: ``Sound.PLAY_WAIT_FOR_COMPLETE``, ``Sound.PLAY_NO_WAIT_FOR_COMPLETE`` or ``Sound.PLAY_LOOP`` :return: When ``Sound.PLAY_NO_WAIT_FOR_COMPLETE`` is specified, returns the PID of the underlying beep command; ``None`` otherwise :raises ValueError: is invalid parameter (note, duration,...)
[ "Plays", "a", "note", "given", "by", "its", "name", "as", "defined", "in", "_NOTE_FREQUENCIES", "." ]
python
train
testingrequired/vom
vom/__init__.py
https://github.com/testingrequired/vom/blob/9e4da7d90c6ae6a01af3c7ec5c40ee4b08327188/vom/__init__.py#L269-L294
def find_elements(self, by, value, view_cls=None): # type: (By, Any, Value) -> List[View] """ Find one or more elements matching condition :param by: Type of condition :param value: Condition value :param view_cls: Optional custom class to wrap returned elements :return: List of matching web elements wrapped in a view """ if view_cls is None: view_cls = View def get_elements(): results = [] try: results = self.root.find_elements(by, value) except NoSuchElementException: pass finally: return results def get_element_at_index(i): return lambda: get_elements()[i] return [view_cls(get_element_at_index(i)) for i, element in enumerate(get_elements())]
[ "def", "find_elements", "(", "self", ",", "by", ",", "value", ",", "view_cls", "=", "None", ")", ":", "# type: (By, Any, Value) -> List[View]", "if", "view_cls", "is", "None", ":", "view_cls", "=", "View", "def", "get_elements", "(", ")", ":", "results", "=", "[", "]", "try", ":", "results", "=", "self", ".", "root", ".", "find_elements", "(", "by", ",", "value", ")", "except", "NoSuchElementException", ":", "pass", "finally", ":", "return", "results", "def", "get_element_at_index", "(", "i", ")", ":", "return", "lambda", ":", "get_elements", "(", ")", "[", "i", "]", "return", "[", "view_cls", "(", "get_element_at_index", "(", "i", ")", ")", "for", "i", ",", "element", "in", "enumerate", "(", "get_elements", "(", ")", ")", "]" ]
Find one or more elements matching condition :param by: Type of condition :param value: Condition value :param view_cls: Optional custom class to wrap returned elements :return: List of matching web elements wrapped in a view
[ "Find", "one", "or", "more", "elements", "matching", "condition", ":", "param", "by", ":", "Type", "of", "condition", ":", "param", "value", ":", "Condition", "value", ":", "param", "view_cls", ":", "Optional", "custom", "class", "to", "wrap", "returned", "elements", ":", "return", ":", "List", "of", "matching", "web", "elements", "wrapped", "in", "a", "view" ]
python
train
drdoctr/doctr
doctr/travis.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L479-L534
def commit_docs(*, added, removed): """ Commit the docs to the current branch Assumes that :func:`setup_GitHub_push`, which sets up the ``doctr_remote`` remote, has been run. Returns True if changes were committed and False if no changes were committed. """ TRAVIS_BUILD_NUMBER = os.environ.get("TRAVIS_BUILD_NUMBER", "<unknown>") TRAVIS_BRANCH = os.environ.get("TRAVIS_BRANCH", "<unknown>") TRAVIS_COMMIT = os.environ.get("TRAVIS_COMMIT", "<unknown>") TRAVIS_REPO_SLUG = os.environ.get("TRAVIS_REPO_SLUG", "<unknown>") TRAVIS_JOB_WEB_URL = os.environ.get("TRAVIS_JOB_WEB_URL", "<unknown>") TRAVIS_TAG = os.environ.get("TRAVIS_TAG", "") branch = "tag" if TRAVIS_TAG else "branch" DOCTR_COMMAND = ' '.join(map(shlex.quote, sys.argv)) if added: run(['git', 'add', *added]) if removed: run(['git', 'rm', *removed]) commit_message = """\ Update docs after building Travis build {TRAVIS_BUILD_NUMBER} of {TRAVIS_REPO_SLUG} The docs were built from the {branch} '{TRAVIS_BRANCH}' against the commit {TRAVIS_COMMIT}. The Travis build that generated this commit is at {TRAVIS_JOB_WEB_URL}. The doctr command that was run is {DOCTR_COMMAND} """.format( branch=branch, TRAVIS_BUILD_NUMBER=TRAVIS_BUILD_NUMBER, TRAVIS_BRANCH=TRAVIS_BRANCH, TRAVIS_COMMIT=TRAVIS_COMMIT, TRAVIS_REPO_SLUG=TRAVIS_REPO_SLUG, TRAVIS_JOB_WEB_URL=TRAVIS_JOB_WEB_URL, DOCTR_COMMAND=DOCTR_COMMAND, ) # Only commit if there were changes if run(['git', 'diff-index', '--exit-code', '--cached', '--quiet', 'HEAD', '--'], exit=False) != 0: print("Committing") run(['git', 'commit', '-am', commit_message]) return True return False
[ "def", "commit_docs", "(", "*", ",", "added", ",", "removed", ")", ":", "TRAVIS_BUILD_NUMBER", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_BUILD_NUMBER\"", ",", "\"<unknown>\"", ")", "TRAVIS_BRANCH", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_BRANCH\"", ",", "\"<unknown>\"", ")", "TRAVIS_COMMIT", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_COMMIT\"", ",", "\"<unknown>\"", ")", "TRAVIS_REPO_SLUG", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_REPO_SLUG\"", ",", "\"<unknown>\"", ")", "TRAVIS_JOB_WEB_URL", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_JOB_WEB_URL\"", ",", "\"<unknown>\"", ")", "TRAVIS_TAG", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_TAG\"", ",", "\"\"", ")", "branch", "=", "\"tag\"", "if", "TRAVIS_TAG", "else", "\"branch\"", "DOCTR_COMMAND", "=", "' '", ".", "join", "(", "map", "(", "shlex", ".", "quote", ",", "sys", ".", "argv", ")", ")", "if", "added", ":", "run", "(", "[", "'git'", ",", "'add'", ",", "*", "added", "]", ")", "if", "removed", ":", "run", "(", "[", "'git'", ",", "'rm'", ",", "*", "removed", "]", ")", "commit_message", "=", "\"\"\"\\\nUpdate docs after building Travis build {TRAVIS_BUILD_NUMBER} of\n{TRAVIS_REPO_SLUG}\n\nThe docs were built from the {branch} '{TRAVIS_BRANCH}' against the commit\n{TRAVIS_COMMIT}.\n\nThe Travis build that generated this commit is at\n{TRAVIS_JOB_WEB_URL}.\n\nThe doctr command that was run is\n\n {DOCTR_COMMAND}\n\"\"\"", ".", "format", "(", "branch", "=", "branch", ",", "TRAVIS_BUILD_NUMBER", "=", "TRAVIS_BUILD_NUMBER", ",", "TRAVIS_BRANCH", "=", "TRAVIS_BRANCH", ",", "TRAVIS_COMMIT", "=", "TRAVIS_COMMIT", ",", "TRAVIS_REPO_SLUG", "=", "TRAVIS_REPO_SLUG", ",", "TRAVIS_JOB_WEB_URL", "=", "TRAVIS_JOB_WEB_URL", ",", "DOCTR_COMMAND", "=", "DOCTR_COMMAND", ",", ")", "# Only commit if there were changes", "if", "run", "(", "[", "'git'", ",", "'diff-index'", ",", "'--exit-code'", ",", "'--cached'", ",", "'--quiet'", ",", "'HEAD'", ",", "'--'", "]", ",", "exit", "=", "False", ")", "!=", "0", ":", "print", "(", "\"Committing\"", ")", "run", "(", "[", "'git'", ",", "'commit'", ",", "'-am'", ",", "commit_message", "]", ")", "return", "True", "return", "False" ]
Commit the docs to the current branch Assumes that :func:`setup_GitHub_push`, which sets up the ``doctr_remote`` remote, has been run. Returns True if changes were committed and False if no changes were committed.
[ "Commit", "the", "docs", "to", "the", "current", "branch" ]
python
train
ucsb-cs/submit
submit/helpers.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/helpers.py#L329-L342
def get_queue_func(request): """Establish the connection to rabbitmq.""" def cleanup(request): conn.close() def queue_func(**kwargs): return conn.channel().basic_publish( exchange='', body=json.dumps(kwargs), routing_key=queue, properties=pika.BasicProperties(delivery_mode=2)) server = request.registry.settings['queue_server'] queue = request.registry.settings['queue_verification'] conn = pika.BlockingConnection(pika.ConnectionParameters(host=server)) request.add_finished_callback(cleanup) return queue_func
[ "def", "get_queue_func", "(", "request", ")", ":", "def", "cleanup", "(", "request", ")", ":", "conn", ".", "close", "(", ")", "def", "queue_func", "(", "*", "*", "kwargs", ")", ":", "return", "conn", ".", "channel", "(", ")", ".", "basic_publish", "(", "exchange", "=", "''", ",", "body", "=", "json", ".", "dumps", "(", "kwargs", ")", ",", "routing_key", "=", "queue", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "delivery_mode", "=", "2", ")", ")", "server", "=", "request", ".", "registry", ".", "settings", "[", "'queue_server'", "]", "queue", "=", "request", ".", "registry", ".", "settings", "[", "'queue_verification'", "]", "conn", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "server", ")", ")", "request", ".", "add_finished_callback", "(", "cleanup", ")", "return", "queue_func" ]
Establish the connection to rabbitmq.
[ "Establish", "the", "connection", "to", "rabbitmq", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/TC.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TC.py#L1636-L1647
def text_to_data(self, text, elt, ps): '''convert text into typecode specific data. items in list are space separated. ''' v = [] items = text.split() for item in items: v.append(self.itemTypeCode.text_to_data(item, elt, ps)) if self.pyclass is not None: return self.pyclass(v) return v
[ "def", "text_to_data", "(", "self", ",", "text", ",", "elt", ",", "ps", ")", ":", "v", "=", "[", "]", "items", "=", "text", ".", "split", "(", ")", "for", "item", "in", "items", ":", "v", ".", "append", "(", "self", ".", "itemTypeCode", ".", "text_to_data", "(", "item", ",", "elt", ",", "ps", ")", ")", "if", "self", ".", "pyclass", "is", "not", "None", ":", "return", "self", ".", "pyclass", "(", "v", ")", "return", "v" ]
convert text into typecode specific data. items in list are space separated.
[ "convert", "text", "into", "typecode", "specific", "data", ".", "items", "in", "list", "are", "space", "separated", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3299-L3311
def getVideoStreamFrameBuffer(self, hTrackedCamera, eFrameType, pFrameBuffer, nFrameBufferSize, nFrameHeaderSize): """ Copies the image frame into a caller's provided buffer. The image data is currently provided as RGBA data, 4 bytes per pixel. A caller can provide null for the framebuffer or frameheader if not desired. Requesting the frame header first, followed by the frame buffer allows the caller to determine if the frame as advanced per the frame header sequence. If there is no frame available yet, due to initial camera spinup or re-activation, the error will be VRTrackedCameraError_NoFrameAvailable. Ideally a caller should be polling at ~16ms intervals """ fn = self.function_table.getVideoStreamFrameBuffer pFrameHeader = CameraVideoStreamFrameHeader_t() result = fn(hTrackedCamera, eFrameType, pFrameBuffer, nFrameBufferSize, byref(pFrameHeader), nFrameHeaderSize) return result, pFrameHeader
[ "def", "getVideoStreamFrameBuffer", "(", "self", ",", "hTrackedCamera", ",", "eFrameType", ",", "pFrameBuffer", ",", "nFrameBufferSize", ",", "nFrameHeaderSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getVideoStreamFrameBuffer", "pFrameHeader", "=", "CameraVideoStreamFrameHeader_t", "(", ")", "result", "=", "fn", "(", "hTrackedCamera", ",", "eFrameType", ",", "pFrameBuffer", ",", "nFrameBufferSize", ",", "byref", "(", "pFrameHeader", ")", ",", "nFrameHeaderSize", ")", "return", "result", ",", "pFrameHeader" ]
Copies the image frame into a caller's provided buffer. The image data is currently provided as RGBA data, 4 bytes per pixel. A caller can provide null for the framebuffer or frameheader if not desired. Requesting the frame header first, followed by the frame buffer allows the caller to determine if the frame as advanced per the frame header sequence. If there is no frame available yet, due to initial camera spinup or re-activation, the error will be VRTrackedCameraError_NoFrameAvailable. Ideally a caller should be polling at ~16ms intervals
[ "Copies", "the", "image", "frame", "into", "a", "caller", "s", "provided", "buffer", ".", "The", "image", "data", "is", "currently", "provided", "as", "RGBA", "data", "4", "bytes", "per", "pixel", ".", "A", "caller", "can", "provide", "null", "for", "the", "framebuffer", "or", "frameheader", "if", "not", "desired", ".", "Requesting", "the", "frame", "header", "first", "followed", "by", "the", "frame", "buffer", "allows", "the", "caller", "to", "determine", "if", "the", "frame", "as", "advanced", "per", "the", "frame", "header", "sequence", ".", "If", "there", "is", "no", "frame", "available", "yet", "due", "to", "initial", "camera", "spinup", "or", "re", "-", "activation", "the", "error", "will", "be", "VRTrackedCameraError_NoFrameAvailable", ".", "Ideally", "a", "caller", "should", "be", "polling", "at", "~16ms", "intervals" ]
python
train
agoragames/kairos
kairos/mongo_backend.py
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/mongo_backend.py#L152-L160
def _insert(self, name, value, timestamp, intervals, **kwargs): ''' Insert the new value. ''' # TODO: confirm that this is in fact using the indices correctly. for interval,config in self._intervals.items(): timestamps = self._normalize_timestamps(timestamp, intervals, config) for tstamp in timestamps: self._insert_data(name, value, tstamp, interval, config, **kwargs)
[ "def", "_insert", "(", "self", ",", "name", ",", "value", ",", "timestamp", ",", "intervals", ",", "*", "*", "kwargs", ")", ":", "# TODO: confirm that this is in fact using the indices correctly.", "for", "interval", ",", "config", "in", "self", ".", "_intervals", ".", "items", "(", ")", ":", "timestamps", "=", "self", ".", "_normalize_timestamps", "(", "timestamp", ",", "intervals", ",", "config", ")", "for", "tstamp", "in", "timestamps", ":", "self", ".", "_insert_data", "(", "name", ",", "value", ",", "tstamp", ",", "interval", ",", "config", ",", "*", "*", "kwargs", ")" ]
Insert the new value.
[ "Insert", "the", "new", "value", "." ]
python
train
thomasdelaet/python-velbus
velbus/messages/start_relay_blinking_timer.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/start_relay_blinking_timer.py#L40-L47
def data_to_binary(self): """ :return: bytes """ return bytes([ COMMAND_CODE, self.channels_to_byte(self.relay_channels)]) + \ struct.pack('>L', self.delay_time)[-3:]
[ "def", "data_to_binary", "(", "self", ")", ":", "return", "bytes", "(", "[", "COMMAND_CODE", ",", "self", ".", "channels_to_byte", "(", "self", ".", "relay_channels", ")", "]", ")", "+", "struct", ".", "pack", "(", "'>L'", ",", "self", ".", "delay_time", ")", "[", "-", "3", ":", "]" ]
:return: bytes
[ ":", "return", ":", "bytes" ]
python
train
milesrichardson/ParsePy
parse_rest/user.py
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/user.py#L21-L28
def login_required(func): '''decorator describing User methods that need to be logged in''' def ret(obj, *args, **kw): if not hasattr(obj, 'sessionToken'): message = '%s requires a logged-in session' % func.__name__ raise ResourceRequestLoginRequired(message) return func(obj, *args, **kw) return ret
[ "def", "login_required", "(", "func", ")", ":", "def", "ret", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'sessionToken'", ")", ":", "message", "=", "'%s requires a logged-in session'", "%", "func", ".", "__name__", "raise", "ResourceRequestLoginRequired", "(", "message", ")", "return", "func", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", "return", "ret" ]
decorator describing User methods that need to be logged in
[ "decorator", "describing", "User", "methods", "that", "need", "to", "be", "logged", "in" ]
python
train
SeabornGames/RequestClient
seaborn/request_client/repr_wrapper.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/repr_wrapper.py#L153-L165
def repr_setup(self, name=None, col_names=None, col_types=None): """ This wasn't safe to pass into init because of the inheritance :param name: name of the api return type (ex. CAMERA_DATA_LIST) :param col_names: :param col_types: :return None: """ self._name = name or self._name self._col_names = col_names or self._col_names self._col_types = col_types or self._col_types if self._original: self._col_names = self._col_names[:len(self._original[0])]
[ "def", "repr_setup", "(", "self", ",", "name", "=", "None", ",", "col_names", "=", "None", ",", "col_types", "=", "None", ")", ":", "self", ".", "_name", "=", "name", "or", "self", ".", "_name", "self", ".", "_col_names", "=", "col_names", "or", "self", ".", "_col_names", "self", ".", "_col_types", "=", "col_types", "or", "self", ".", "_col_types", "if", "self", ".", "_original", ":", "self", ".", "_col_names", "=", "self", ".", "_col_names", "[", ":", "len", "(", "self", ".", "_original", "[", "0", "]", ")", "]" ]
This wasn't safe to pass into init because of the inheritance :param name: name of the api return type (ex. CAMERA_DATA_LIST) :param col_names: :param col_types: :return None:
[ "This", "wasn", "t", "safe", "to", "pass", "into", "init", "because", "of", "the", "inheritance", ":", "param", "name", ":", "name", "of", "the", "api", "return", "type", "(", "ex", ".", "CAMERA_DATA_LIST", ")", ":", "param", "col_names", ":", ":", "param", "col_types", ":", ":", "return", "None", ":" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1195-L1215
def update_build_properties(self, document, project, build_id): """UpdateBuildProperties. [Preview API] Updates properties for a build. :param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') content = self._serialize.body(document, '[JsonPatchOperation]') response = self._send(http_method='PATCH', location_id='0a6312e9-0627-49b7-8083-7d74a64849c9', version='5.0-preview.1', route_values=route_values, content=content, media_type='application/json-patch+json') return self._deserialize('object', response)
[ "def", "update_build_properties", "(", "self", ",", "document", ",", "project", ",", "build_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "build_id", "is", "not", "None", ":", "route_values", "[", "'buildId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'build_id'", ",", "build_id", ",", "'int'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "document", ",", "'[JsonPatchOperation]'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'0a6312e9-0627-49b7-8083-7d74a64849c9'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ",", "media_type", "=", "'application/json-patch+json'", ")", "return", "self", ".", "_deserialize", "(", "'object'", ",", "response", ")" ]
UpdateBuildProperties. [Preview API] Updates properties for a build. :param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
[ "UpdateBuildProperties", ".", "[", "Preview", "API", "]", "Updates", "properties", "for", "a", "build", ".", ":", "param", ":", "class", ":", "<", "[", "JsonPatchOperation", "]", ">", "<azure", ".", "devops", ".", "v5_0", ".", "build", ".", "models", ".", "[", "JsonPatchOperation", "]", ">", "document", ":", "A", "json", "-", "patch", "document", "describing", "the", "properties", "to", "update", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "build_id", ":", "The", "ID", "of", "the", "build", ".", ":", "rtype", ":", ":", "class", ":", "<object", ">", "<azure", ".", "devops", ".", "v5_0", ".", "build", ".", "models", ".", "object", ">" ]
python
train
DataDog/integrations-core
mapreduce/datadog_checks/mapreduce/mapreduce.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mapreduce/datadog_checks/mapreduce/mapreduce.py#L456-L465
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None): """ Set a metric """ if metric_type == self.HISTOGRAM: self.histogram(metric_name, value, tags=tags, device_name=device_name) elif metric_type == self.INCREMENT: self.increment(metric_name, value, tags=tags, device_name=device_name) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
[ "def", "_set_metric", "(", "self", ",", "metric_name", ",", "metric_type", ",", "value", ",", "tags", "=", "None", ",", "device_name", "=", "None", ")", ":", "if", "metric_type", "==", "self", ".", "HISTOGRAM", ":", "self", ".", "histogram", "(", "metric_name", ",", "value", ",", "tags", "=", "tags", ",", "device_name", "=", "device_name", ")", "elif", "metric_type", "==", "self", ".", "INCREMENT", ":", "self", ".", "increment", "(", "metric_name", ",", "value", ",", "tags", "=", "tags", ",", "device_name", "=", "device_name", ")", "else", ":", "self", ".", "log", ".", "error", "(", "'Metric type \"{}\" unknown'", ".", "format", "(", "metric_type", ")", ")" ]
Set a metric
[ "Set", "a", "metric" ]
python
train
pybel/pybel-tools
src/pybel_tools/analysis/stability.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/analysis/stability.py#L179-L185
def get_mutually_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]: """Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``.""" cg = get_correlation_graph(graph) for a, b, c in get_correlation_triangles(cg): if all(NEGATIVE_CORRELATION in x for x in (cg[a][b], cg[b][c], cg[a][c])): yield a, b, c
[ "def", "get_mutually_unstable_correlation_triples", "(", "graph", ":", "BELGraph", ")", "->", "Iterable", "[", "NodeTriple", "]", ":", "cg", "=", "get_correlation_graph", "(", "graph", ")", "for", "a", ",", "b", ",", "c", "in", "get_correlation_triangles", "(", "cg", ")", ":", "if", "all", "(", "NEGATIVE_CORRELATION", "in", "x", "for", "x", "in", "(", "cg", "[", "a", "]", "[", "b", "]", ",", "cg", "[", "b", "]", "[", "c", "]", ",", "cg", "[", "a", "]", "[", "c", "]", ")", ")", ":", "yield", "a", ",", "b", ",", "c" ]
Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``.
[ "Yield", "triples", "of", "nodes", "(", "A", "B", "C", ")", "such", "that", "A", "neg", "B", "B", "neg", "C", "and", "C", "neg", "A", "." ]
python
valid
xapple/plumbing
plumbing/slurm/__init__.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/__init__.py#L10-L19
def count_processors(): """How many cores does the current computer have ?""" if 'SLURM_NTASKS' in os.environ: return int(os.environ['SLURM_NTASKS']) elif 'SLURM_JOB_CPUS_PER_NODE' in os.environ: text = os.environ['SLURM_JOB_CPUS_PER_NODE'] if is_integer(text): return int(text) else: n, N = re.findall("([1-9]+)\(x([1-9]+)\)", text)[0] return int(n) * int(N) else: return multiprocessing.cpu_count()
[ "def", "count_processors", "(", ")", ":", "if", "'SLURM_NTASKS'", "in", "os", ".", "environ", ":", "return", "int", "(", "os", ".", "environ", "[", "'SLURM_NTASKS'", "]", ")", "elif", "'SLURM_JOB_CPUS_PER_NODE'", "in", "os", ".", "environ", ":", "text", "=", "os", ".", "environ", "[", "'SLURM_JOB_CPUS_PER_NODE'", "]", "if", "is_integer", "(", "text", ")", ":", "return", "int", "(", "text", ")", "else", ":", "n", ",", "N", "=", "re", ".", "findall", "(", "\"([1-9]+)\\(x([1-9]+)\\)\"", ",", "text", ")", "[", "0", "]", "return", "int", "(", "n", ")", "*", "int", "(", "N", ")", "else", ":", "return", "multiprocessing", ".", "cpu_count", "(", ")" ]
How many cores does the current computer have ?
[ "How", "many", "cores", "does", "the", "current", "computer", "have", "?" ]
python
train
honmaple/flask-maple
example/manager.py
https://github.com/honmaple/flask-maple/blob/8124de55e5e531a5cb43477944168f98608dc08f/example/manager.py#L24-L31
def init_db(): """ Drops and re-creates the SQL schema """ db.drop_all() db.configure_mappers() db.create_all() db.session.commit()
[ "def", "init_db", "(", ")", ":", "db", ".", "drop_all", "(", ")", "db", ".", "configure_mappers", "(", ")", "db", ".", "create_all", "(", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Drops and re-creates the SQL schema
[ "Drops", "and", "re", "-", "creates", "the", "SQL", "schema" ]
python
train
splunk/splunk-sdk-python
examples/analytics/bottle.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L944-L950
def params(self): """ A combined :class:`MultiDict` with values from :attr:`forms` and :attr:`GET`. File-uploads are not included. """ params = MultiDict(self.GET) for key, value in self.forms.iterallitems(): params[key] = value return params
[ "def", "params", "(", "self", ")", ":", "params", "=", "MultiDict", "(", "self", ".", "GET", ")", "for", "key", ",", "value", "in", "self", ".", "forms", ".", "iterallitems", "(", ")", ":", "params", "[", "key", "]", "=", "value", "return", "params" ]
A combined :class:`MultiDict` with values from :attr:`forms` and :attr:`GET`. File-uploads are not included.
[ "A", "combined", ":", "class", ":", "MultiDict", "with", "values", "from", ":", "attr", ":", "forms", "and", ":", "attr", ":", "GET", ".", "File", "-", "uploads", "are", "not", "included", "." ]
python
train
matousc89/padasip
padasip/filters/__init__.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/__init__.py#L179-L228
def filter_data(d, x, model="lms", **kwargs): """ Function that filter data with selected adaptive filter. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * Any key argument that can be accepted with selected filter model. For more information see documentation of desired adaptive filter. **Returns:** * `y` : output value (1 dimensional array). The size corresponds with the desired value. * `e` : filter error for every sample (1 dimensional array). The size corresponds with the desired value. * `w` : history of all weights (2 dimensional array). Every row is set of the weights for given sample. """ # overwrite n with correct size kwargs["n"] = x.shape[1] # create filter according model if model in ["LMS", "lms"]: f = FilterLMS(**kwargs) elif model in ["NLMS", "nlms"]: f = FilterNLMS(**kwargs) elif model in ["RLS", "rls"]: f = FilterRLS(**kwargs) elif model in ["GNGD", "gngd"]: f = FilterGNGD(**kwargs) elif model in ["AP", "ap"]: f = FilterAP(**kwargs) elif model in ["LMF", "lmf"]: f = FilterLMF(**kwargs) elif model in ["NLMF", "nlmf"]: f = FilterNLMF(**kwargs) else: raise ValueError('Unknown model of filter {}'.format(model)) # calculate and return the values y, e, w = f.run(d, x) return y, e, w
[ "def", "filter_data", "(", "d", ",", "x", ",", "model", "=", "\"lms\"", ",", "*", "*", "kwargs", ")", ":", "# overwrite n with correct size", "kwargs", "[", "\"n\"", "]", "=", "x", ".", "shape", "[", "1", "]", "# create filter according model", "if", "model", "in", "[", "\"LMS\"", ",", "\"lms\"", "]", ":", "f", "=", "FilterLMS", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"NLMS\"", ",", "\"nlms\"", "]", ":", "f", "=", "FilterNLMS", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"RLS\"", ",", "\"rls\"", "]", ":", "f", "=", "FilterRLS", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"GNGD\"", ",", "\"gngd\"", "]", ":", "f", "=", "FilterGNGD", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"AP\"", ",", "\"ap\"", "]", ":", "f", "=", "FilterAP", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"LMF\"", ",", "\"lmf\"", "]", ":", "f", "=", "FilterLMF", "(", "*", "*", "kwargs", ")", "elif", "model", "in", "[", "\"NLMF\"", ",", "\"nlmf\"", "]", ":", "f", "=", "FilterNLMF", "(", "*", "*", "kwargs", ")", "else", ":", "raise", "ValueError", "(", "'Unknown model of filter {}'", ".", "format", "(", "model", ")", ")", "# calculate and return the values", "y", ",", "e", ",", "w", "=", "f", ".", "run", "(", "d", ",", "x", ")", "return", "y", ",", "e", ",", "w" ]
Function that filter data with selected adaptive filter. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * Any key argument that can be accepted with selected filter model. For more information see documentation of desired adaptive filter. **Returns:** * `y` : output value (1 dimensional array). The size corresponds with the desired value. * `e` : filter error for every sample (1 dimensional array). The size corresponds with the desired value. * `w` : history of all weights (2 dimensional array). Every row is set of the weights for given sample.
[ "Function", "that", "filter", "data", "with", "selected", "adaptive", "filter", ".", "**", "Args", ":", "**" ]
python
train
crazy-canux/arguspy
arguspy/ssh_paramiko.py
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/arguspy/ssh_paramiko.py#L51-L82
def execute(self, command, timeout=None): """Execute a shell command.""" try: self.channel = self.ssh.get_transport().open_session() except paramiko.SSHException as e: self.unknown("Create channel error: %s" % e) try: self.channel.settimeout(self.args.timeout if not timeout else timeout) except socket.timeout as e: self.unknown("Settimeout for channel error: %s" % e) try: self.logger.debug("command: {}".format(command)) self.channel.exec_command(command) except paramiko.SSHException as e: self.unknown("Execute command error: %s" % e) try: self.stdin = self.channel.makefile('wb', -1) self.stderr = map(string.strip, self.channel.makefile_stderr('rb', -1).readlines()) self.stdout = map(string.strip, self.channel.makefile('rb', -1).readlines()) except Exception as e: self.unknown("Get result error: %s" % e) try: self.status = self.channel.recv_exit_status() except paramiko.SSHException as e: self.unknown("Get return code error: %s" % e) else: if self.status != 0: self.unknown("Return code: %d , stderr: %s" % (self.status, self.errors)) else: return self.stdout finally: self.logger.debug("Execute command finish.")
[ "def", "execute", "(", "self", ",", "command", ",", "timeout", "=", "None", ")", ":", "try", ":", "self", ".", "channel", "=", "self", ".", "ssh", ".", "get_transport", "(", ")", ".", "open_session", "(", ")", "except", "paramiko", ".", "SSHException", "as", "e", ":", "self", ".", "unknown", "(", "\"Create channel error: %s\"", "%", "e", ")", "try", ":", "self", ".", "channel", ".", "settimeout", "(", "self", ".", "args", ".", "timeout", "if", "not", "timeout", "else", "timeout", ")", "except", "socket", ".", "timeout", "as", "e", ":", "self", ".", "unknown", "(", "\"Settimeout for channel error: %s\"", "%", "e", ")", "try", ":", "self", ".", "logger", ".", "debug", "(", "\"command: {}\"", ".", "format", "(", "command", ")", ")", "self", ".", "channel", ".", "exec_command", "(", "command", ")", "except", "paramiko", ".", "SSHException", "as", "e", ":", "self", ".", "unknown", "(", "\"Execute command error: %s\"", "%", "e", ")", "try", ":", "self", ".", "stdin", "=", "self", ".", "channel", ".", "makefile", "(", "'wb'", ",", "-", "1", ")", "self", ".", "stderr", "=", "map", "(", "string", ".", "strip", ",", "self", ".", "channel", ".", "makefile_stderr", "(", "'rb'", ",", "-", "1", ")", ".", "readlines", "(", ")", ")", "self", ".", "stdout", "=", "map", "(", "string", ".", "strip", ",", "self", ".", "channel", ".", "makefile", "(", "'rb'", ",", "-", "1", ")", ".", "readlines", "(", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "unknown", "(", "\"Get result error: %s\"", "%", "e", ")", "try", ":", "self", ".", "status", "=", "self", ".", "channel", ".", "recv_exit_status", "(", ")", "except", "paramiko", ".", "SSHException", "as", "e", ":", "self", ".", "unknown", "(", "\"Get return code error: %s\"", "%", "e", ")", "else", ":", "if", "self", ".", "status", "!=", "0", ":", "self", ".", "unknown", "(", "\"Return code: %d , stderr: %s\"", "%", "(", "self", ".", "status", ",", "self", ".", "errors", ")", ")", "else", ":", "return", "self", ".", "stdout", "finally", ":", "self", ".", "logger", ".", "debug", "(", "\"Execute command finish.\"", ")" ]
Execute a shell command.
[ "Execute", "a", "shell", "command", "." ]
python
valid
mlperf/training
reinforcement/tensorflow/minigo/rl_loop/example_buffer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/rl_loop/example_buffer.py#L100-L117
def update(self, new_games): """ new_games is a list of .tfrecord.zz new game records. """ new_games.sort(key=os.path.basename) first_new_game = None for idx, game in enumerate(new_games): timestamp = file_timestamp(game) if timestamp <= self.examples[-1][0]: continue elif first_new_game is None: first_new_game = idx num_new_games = len(new_games) - idx print("Found {}/{} new games".format( num_new_games, len(new_games))) self.total_updates += num_new_games self.examples.extend(self.func(game)) if first_new_game is None: print("No new games", file_timestamp( new_games[-1]), self.examples[-1][0])
[ "def", "update", "(", "self", ",", "new_games", ")", ":", "new_games", ".", "sort", "(", "key", "=", "os", ".", "path", ".", "basename", ")", "first_new_game", "=", "None", "for", "idx", ",", "game", "in", "enumerate", "(", "new_games", ")", ":", "timestamp", "=", "file_timestamp", "(", "game", ")", "if", "timestamp", "<=", "self", ".", "examples", "[", "-", "1", "]", "[", "0", "]", ":", "continue", "elif", "first_new_game", "is", "None", ":", "first_new_game", "=", "idx", "num_new_games", "=", "len", "(", "new_games", ")", "-", "idx", "print", "(", "\"Found {}/{} new games\"", ".", "format", "(", "num_new_games", ",", "len", "(", "new_games", ")", ")", ")", "self", ".", "total_updates", "+=", "num_new_games", "self", ".", "examples", ".", "extend", "(", "self", ".", "func", "(", "game", ")", ")", "if", "first_new_game", "is", "None", ":", "print", "(", "\"No new games\"", ",", "file_timestamp", "(", "new_games", "[", "-", "1", "]", ")", ",", "self", ".", "examples", "[", "-", "1", "]", "[", "0", "]", ")" ]
new_games is a list of .tfrecord.zz new game records.
[ "new_games", "is", "a", "list", "of", ".", "tfrecord", ".", "zz", "new", "game", "records", "." ]
python
train
libyal/dtfabric
dtfabric/runtime/data_maps.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L909-L1031
def _CompositeMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the data type definition cannot be mapped on the byte stream. """ elements_data_size = None elements_terminator = None number_of_elements = None if self._HasElementsDataSize(): elements_data_size = self._EvaluateElementsDataSize(context) element_byte_size = self._element_data_type_definition.GetByteSize() if element_byte_size is not None: number_of_elements, _ = divmod(elements_data_size, element_byte_size) else: elements_terminator = ( self._element_data_type_definition.elements_terminator) elif self._HasElementsTerminator(): elements_terminator = self._data_type_definition.elements_terminator elif self._HasNumberOfElements(): number_of_elements = self._EvaluateNumberOfElements(context) if elements_terminator is None and number_of_elements is None: raise errors.MappingError( 'Unable to determine element terminator or number of elements') context_state = getattr(context, 'state', {}) elements_data_offset = context_state.get('elements_data_offset', 0) element_index = context_state.get('element_index', 0) element_value = None mapped_values = context_state.get('mapped_values', []) size_hints = context_state.get('size_hints', {}) subcontext = context_state.get('context', None) if not subcontext: subcontext = DataTypeMapContext() try: while byte_stream[byte_offset:]: if (number_of_elements is not None and element_index == number_of_elements): break if (elements_data_size is not None and elements_data_offset >= elements_data_size): break element_value = self._element_data_type_map.MapByteStream( byte_stream, byte_offset=byte_offset, context=subcontext) byte_offset += subcontext.byte_size elements_data_offset += subcontext.byte_size element_index += 1 mapped_values.append(element_value) if (elements_terminator is not None and element_value == elements_terminator): break except errors.ByteStreamTooSmallError as exception: context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values raise errors.ByteStreamTooSmallError(exception) except Exception as exception: raise errors.MappingError(exception) if number_of_elements is not None and element_index != number_of_elements: context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: missing element: {2:d}').format( self._data_type_definition.name, byte_offset, element_index - 1) raise errors.ByteStreamTooSmallError(error_string) if (elements_terminator is not None and element_value != elements_terminator and ( elements_data_size is None or elements_data_offset < elements_data_size)): byte_stream_size = len(byte_stream) size_hints[self._data_type_definition.name] = DataTypeMapSizeHint( byte_stream_size - byte_offset) context_state['context'] = subcontext context_state['elements_data_offset'] = elements_data_offset context_state['element_index'] = element_index context_state['mapped_values'] = mapped_values context_state['size_hints'] = size_hints error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: unable to find elements terminator').format( self._data_type_definition.name, byte_offset) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = elements_data_offset context.state = {} return tuple(mapped_values)
[ "def", "_CompositeMapByteStream", "(", "self", ",", "byte_stream", ",", "byte_offset", "=", "0", ",", "context", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "elements_data_size", "=", "None", "elements_terminator", "=", "None", "number_of_elements", "=", "None", "if", "self", ".", "_HasElementsDataSize", "(", ")", ":", "elements_data_size", "=", "self", ".", "_EvaluateElementsDataSize", "(", "context", ")", "element_byte_size", "=", "self", ".", "_element_data_type_definition", ".", "GetByteSize", "(", ")", "if", "element_byte_size", "is", "not", "None", ":", "number_of_elements", ",", "_", "=", "divmod", "(", "elements_data_size", ",", "element_byte_size", ")", "else", ":", "elements_terminator", "=", "(", "self", ".", "_element_data_type_definition", ".", "elements_terminator", ")", "elif", "self", ".", "_HasElementsTerminator", "(", ")", ":", "elements_terminator", "=", "self", ".", "_data_type_definition", ".", "elements_terminator", "elif", "self", ".", "_HasNumberOfElements", "(", ")", ":", "number_of_elements", "=", "self", ".", "_EvaluateNumberOfElements", "(", "context", ")", "if", "elements_terminator", "is", "None", "and", "number_of_elements", "is", "None", ":", "raise", "errors", ".", "MappingError", "(", "'Unable to determine element terminator or number of elements'", ")", "context_state", "=", "getattr", "(", "context", ",", "'state'", ",", "{", "}", ")", "elements_data_offset", "=", "context_state", ".", "get", "(", "'elements_data_offset'", ",", "0", ")", "element_index", "=", "context_state", ".", "get", "(", "'element_index'", ",", "0", ")", "element_value", "=", "None", "mapped_values", "=", "context_state", ".", "get", "(", "'mapped_values'", ",", "[", "]", ")", "size_hints", "=", "context_state", ".", "get", "(", "'size_hints'", ",", "{", "}", ")", "subcontext", "=", "context_state", ".", "get", "(", "'context'", ",", "None", ")", "if", "not", "subcontext", ":", "subcontext", "=", "DataTypeMapContext", "(", ")", "try", ":", "while", "byte_stream", "[", "byte_offset", ":", "]", ":", "if", "(", "number_of_elements", "is", "not", "None", "and", "element_index", "==", "number_of_elements", ")", ":", "break", "if", "(", "elements_data_size", "is", "not", "None", "and", "elements_data_offset", ">=", "elements_data_size", ")", ":", "break", "element_value", "=", "self", ".", "_element_data_type_map", ".", "MapByteStream", "(", "byte_stream", ",", "byte_offset", "=", "byte_offset", ",", "context", "=", "subcontext", ")", "byte_offset", "+=", "subcontext", ".", "byte_size", "elements_data_offset", "+=", "subcontext", ".", "byte_size", "element_index", "+=", "1", "mapped_values", ".", "append", "(", "element_value", ")", "if", "(", "elements_terminator", "is", "not", "None", "and", "element_value", "==", "elements_terminator", ")", ":", "break", "except", "errors", ".", "ByteStreamTooSmallError", "as", "exception", ":", "context_state", "[", "'context'", "]", "=", "subcontext", "context_state", "[", "'elements_data_offset'", "]", "=", "elements_data_offset", "context_state", "[", "'element_index'", "]", "=", "element_index", "context_state", "[", "'mapped_values'", "]", "=", "mapped_values", "raise", "errors", ".", "ByteStreamTooSmallError", "(", "exception", ")", "except", "Exception", "as", "exception", ":", "raise", "errors", ".", "MappingError", "(", "exception", ")", "if", "number_of_elements", "is", "not", "None", "and", "element_index", "!=", "number_of_elements", ":", "context_state", "[", "'context'", "]", "=", "subcontext", "context_state", "[", "'elements_data_offset'", "]", "=", "elements_data_offset", "context_state", "[", "'element_index'", "]", "=", "element_index", "context_state", "[", "'mapped_values'", "]", "=", "mapped_values", "error_string", "=", "(", "'Unable to read: {0:s} from byte stream at offset: {1:d} '", "'with error: missing element: {2:d}'", ")", ".", "format", "(", "self", ".", "_data_type_definition", ".", "name", ",", "byte_offset", ",", "element_index", "-", "1", ")", "raise", "errors", ".", "ByteStreamTooSmallError", "(", "error_string", ")", "if", "(", "elements_terminator", "is", "not", "None", "and", "element_value", "!=", "elements_terminator", "and", "(", "elements_data_size", "is", "None", "or", "elements_data_offset", "<", "elements_data_size", ")", ")", ":", "byte_stream_size", "=", "len", "(", "byte_stream", ")", "size_hints", "[", "self", ".", "_data_type_definition", ".", "name", "]", "=", "DataTypeMapSizeHint", "(", "byte_stream_size", "-", "byte_offset", ")", "context_state", "[", "'context'", "]", "=", "subcontext", "context_state", "[", "'elements_data_offset'", "]", "=", "elements_data_offset", "context_state", "[", "'element_index'", "]", "=", "element_index", "context_state", "[", "'mapped_values'", "]", "=", "mapped_values", "context_state", "[", "'size_hints'", "]", "=", "size_hints", "error_string", "=", "(", "'Unable to read: {0:s} from byte stream at offset: {1:d} '", "'with error: unable to find elements terminator'", ")", ".", "format", "(", "self", ".", "_data_type_definition", ".", "name", ",", "byte_offset", ")", "raise", "errors", ".", "ByteStreamTooSmallError", "(", "error_string", ")", "if", "context", ":", "context", ".", "byte_size", "=", "elements_data_offset", "context", ".", "state", "=", "{", "}", "return", "tuple", "(", "mapped_values", ")" ]
Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: ByteStreamTooSmallError: if the byte stream is too small. MappingError: if the data type definition cannot be mapped on the byte stream.
[ "Maps", "a", "sequence", "of", "composite", "data", "types", "on", "a", "byte", "stream", "." ]
python
train
PyGithub/PyGithub
github/Team.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Team.py#L235-L262
def edit(self, name, description=github.GithubObject.NotSet, permission=github.GithubObject.NotSet, privacy=github.GithubObject.NotSet): """ :calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_ :param name: string :param description: string :param permission: string :param privacy: string :rtype: None """ assert isinstance(name, (str, unicode)), name assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description assert permission is github.GithubObject.NotSet or isinstance(permission, (str, unicode)), permission assert privacy is github.GithubObject.NotSet or isinstance(privacy, (str, unicode)), privacy post_parameters = { "name": name, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description if permission is not github.GithubObject.NotSet: post_parameters["permission"] = permission if privacy is not github.GithubObject.NotSet: post_parameters["privacy"] = privacy headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters ) self._useAttributes(data)
[ "def", "edit", "(", "self", ",", "name", ",", "description", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "permission", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "privacy", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "isinstance", "(", "name", ",", "(", "str", ",", "unicode", ")", ")", ",", "name", "assert", "description", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "description", ",", "(", "str", ",", "unicode", ")", ")", ",", "description", "assert", "permission", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "permission", ",", "(", "str", ",", "unicode", ")", ")", ",", "permission", "assert", "privacy", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "privacy", ",", "(", "str", ",", "unicode", ")", ")", ",", "privacy", "post_parameters", "=", "{", "\"name\"", ":", "name", ",", "}", "if", "description", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"description\"", "]", "=", "description", "if", "permission", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"permission\"", "]", "=", "permission", "if", "privacy", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"privacy\"", "]", "=", "privacy", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"PATCH\"", ",", "self", ".", "url", ",", "input", "=", "post_parameters", ")", "self", ".", "_useAttributes", "(", "data", ")" ]
:calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_ :param name: string :param description: string :param permission: string :param privacy: string :rtype: None
[ ":", "calls", ":", "PATCH", "/", "teams", "/", ":", "id", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "orgs", "/", "teams", ">", "_", ":", "param", "name", ":", "string", ":", "param", "description", ":", "string", ":", "param", "permission", ":", "string", ":", "param", "privacy", ":", "string", ":", "rtype", ":", "None" ]
python
train
inasafe/inasafe
safe/gui/widgets/field_mapping_tab.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/field_mapping_tab.py#L311-L329
def get_parameter_value(self): """Get parameter of the tab. :returns: Dictionary of parameters by type in this format: {'fields': {}, 'values': {}}. :rtype: dict """ parameters = self.parameter_container.get_parameters(True) field_parameters = {} value_parameters = {} for parameter in parameters: if parameter.selected_option_type() in [SINGLE_DYNAMIC, STATIC]: value_parameters[parameter.guid] = parameter.value elif parameter.selected_option_type() == MULTIPLE_DYNAMIC: field_parameters[parameter.guid] = parameter.value return { 'fields': field_parameters, 'values': value_parameters }
[ "def", "get_parameter_value", "(", "self", ")", ":", "parameters", "=", "self", ".", "parameter_container", ".", "get_parameters", "(", "True", ")", "field_parameters", "=", "{", "}", "value_parameters", "=", "{", "}", "for", "parameter", "in", "parameters", ":", "if", "parameter", ".", "selected_option_type", "(", ")", "in", "[", "SINGLE_DYNAMIC", ",", "STATIC", "]", ":", "value_parameters", "[", "parameter", ".", "guid", "]", "=", "parameter", ".", "value", "elif", "parameter", ".", "selected_option_type", "(", ")", "==", "MULTIPLE_DYNAMIC", ":", "field_parameters", "[", "parameter", ".", "guid", "]", "=", "parameter", ".", "value", "return", "{", "'fields'", ":", "field_parameters", ",", "'values'", ":", "value_parameters", "}" ]
Get parameter of the tab. :returns: Dictionary of parameters by type in this format: {'fields': {}, 'values': {}}. :rtype: dict
[ "Get", "parameter", "of", "the", "tab", "." ]
python
train
nutechsoftware/alarmdecoder
alarmdecoder/devices/usb_device.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/devices/usb_device.py#L303-L314
def close(self): """ Closes the device. """ try: Device.close(self) # HACK: Probably should fork pyftdi and make this call in .close() self._device.usb_dev.attach_kernel_driver(self._device_number) except Exception: pass
[ "def", "close", "(", "self", ")", ":", "try", ":", "Device", ".", "close", "(", "self", ")", "# HACK: Probably should fork pyftdi and make this call in .close()", "self", ".", "_device", ".", "usb_dev", ".", "attach_kernel_driver", "(", "self", ".", "_device_number", ")", "except", "Exception", ":", "pass" ]
Closes the device.
[ "Closes", "the", "device", "." ]
python
train
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L488-L510
def dict_entry_has_key(line, key): """Return True if `line` is a dict entry that uses `key`. Return False for multiline cases where the line should not be removed by itself. """ if '#' in line: return False result = re.match(r'\s*(.*)\s*:\s*(.*),\s*$', line) if not result: return False try: candidate_key = ast.literal_eval(result.group(1)) except (SyntaxError, ValueError): return False if multiline_statement(result.group(2)): return False return candidate_key == key
[ "def", "dict_entry_has_key", "(", "line", ",", "key", ")", ":", "if", "'#'", "in", "line", ":", "return", "False", "result", "=", "re", ".", "match", "(", "r'\\s*(.*)\\s*:\\s*(.*),\\s*$'", ",", "line", ")", "if", "not", "result", ":", "return", "False", "try", ":", "candidate_key", "=", "ast", ".", "literal_eval", "(", "result", ".", "group", "(", "1", ")", ")", "except", "(", "SyntaxError", ",", "ValueError", ")", ":", "return", "False", "if", "multiline_statement", "(", "result", ".", "group", "(", "2", ")", ")", ":", "return", "False", "return", "candidate_key", "==", "key" ]
Return True if `line` is a dict entry that uses `key`. Return False for multiline cases where the line should not be removed by itself.
[ "Return", "True", "if", "line", "is", "a", "dict", "entry", "that", "uses", "key", "." ]
python
test
idlesign/django-sitetree
sitetree/utils.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/utils.py#L189-L206
def get_model_class(settings_entry_name): """Returns a certain sitetree model as defined in the project settings. :param str|unicode settings_entry_name: :rtype: TreeItemBase|TreeBase """ app_name, model_name = get_app_n_model(settings_entry_name) try: model = apps_get_model(app_name, model_name) except (LookupError, ValueError): model = None if model is None: raise ImproperlyConfigured( '`SITETREE_%s` refers to model `%s` that has not been installed.' % (settings_entry_name, model_name)) return model
[ "def", "get_model_class", "(", "settings_entry_name", ")", ":", "app_name", ",", "model_name", "=", "get_app_n_model", "(", "settings_entry_name", ")", "try", ":", "model", "=", "apps_get_model", "(", "app_name", ",", "model_name", ")", "except", "(", "LookupError", ",", "ValueError", ")", ":", "model", "=", "None", "if", "model", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "'`SITETREE_%s` refers to model `%s` that has not been installed.'", "%", "(", "settings_entry_name", ",", "model_name", ")", ")", "return", "model" ]
Returns a certain sitetree model as defined in the project settings. :param str|unicode settings_entry_name: :rtype: TreeItemBase|TreeBase
[ "Returns", "a", "certain", "sitetree", "model", "as", "defined", "in", "the", "project", "settings", "." ]
python
test
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L968-L1011
def get_assessment_part_form_for_update(self, assessment_part_id): """Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or assessment_part_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) mdata = {} if not result['assessmentPartId']: pass else: parent_part_id = Id(result['assessmentPartId']) mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True) lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy) if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1: mdata['sequestered']['is_read_only'] = True mdata['sequestered']['is_required'] = True obj_form = objects.AssessmentPartForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy, mdata=mdata) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
[ "def", "get_assessment_part_form_for_update", "(", "self", ",", "assessment_part_id", ")", ":", "collection", "=", "JSONClientValidated", "(", "'assessment_authoring'", ",", "collection", "=", "'AssessmentPart'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "assessment_part_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the argument is not a valid OSID Id'", ")", "if", "(", "assessment_part_id", ".", "get_identifier_namespace", "(", ")", "!=", "'assessment_authoring.AssessmentPart'", "or", "assessment_part_id", ".", "get_authority", "(", ")", "!=", "self", ".", "_authority", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "result", "=", "collection", ".", "find_one", "(", "{", "'_id'", ":", "ObjectId", "(", "assessment_part_id", ".", "get_identifier", "(", ")", ")", "}", ")", "mdata", "=", "{", "}", "if", "not", "result", "[", "'assessmentPartId'", "]", ":", "pass", "else", ":", "parent_part_id", "=", "Id", "(", "result", "[", "'assessmentPartId'", "]", ")", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT_AUTHORING'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_assessment_part_lookup_session_for_bank", "(", "self", ".", "_catalog_id", ",", "proxy", "=", "self", ".", "_proxy", ")", "if", "lookup_session", ".", "get_assessment_parts_for_assessment_part", "(", "parent_part_id", ")", ".", "available", "(", ")", ">", "1", ":", "mdata", "[", "'sequestered'", "]", "[", "'is_read_only'", "]", "=", "True", "mdata", "[", "'sequestered'", "]", "[", "'is_required'", "]", "=", "True", "obj_form", "=", "objects", ".", "AssessmentPartForm", "(", "osid_object_map", "=", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ",", "mdata", "=", "mdata", ")", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "UPDATED", "return", "obj_form" ]
Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "assessment", "part", "form", "for", "updating", "an", "existing", "assessment", "part", "." ]
python
train
brian-rose/climlab
climlab/convection/akmaev_adjustment.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/convection/akmaev_adjustment.py#L58-L129
def Akmaev_adjustment(theta, q, beta, n_k, theta_k, s_k, t_k): '''Single column only.''' L = q.size # number of vertical levels # Akmaev step 1 k = 1 n_k[k-1] = 1 theta_k[k-1] = theta[k-1] l = 2 while True: # Akmaev step 2 n = 1 thistheta = theta[l-1] while True: # Akmaev step 3 if theta_k[k-1] <= thistheta: # Akmaev step 6 k += 1 break # to step 7 else: if n <= 1: s = q[l-1] t = s*thistheta # Akmaev step 4 if n_k[k-1] <= 1: # lower adjacent level is not an earlier-formed neutral layer s_k[k-1] = q[l-n-1] t_k[k-1] = s_k[k-1] * theta_k[k-1] # Akmaev step 5 # join current and underlying layers n += n_k[k-1] s += s_k[k-1] t += t_k[k-1] s_k[k-1] = s t_k[k-1] = t thistheta = t/s if k==1: # joint neutral layer is the first one break # to step 7 k -= 1 # back to step 3 # Akmaev step 7 if l == L: # the scan is over break # to step 8 l += 1 n_k[k-1] = n theta_k[k-1] = thistheta # back to step 2 # update the potential temperatures while True: while True: # Akmaev step 8 if n==1: # current model level was not included in any neutral layer break # to step 11 while True: # Akmaev step 9 theta[l-1] = thistheta if n==1: break # Akmaev step 10 l -= 1 n -= 1 # back to step 9 # Akmaev step 11 if k==1: break k -= 1 l -= 1 n = n_k[k-1] thistheta = theta_k[k-1] # back to step 8 return theta
[ "def", "Akmaev_adjustment", "(", "theta", ",", "q", ",", "beta", ",", "n_k", ",", "theta_k", ",", "s_k", ",", "t_k", ")", ":", "L", "=", "q", ".", "size", "# number of vertical levels", "# Akmaev step 1", "k", "=", "1", "n_k", "[", "k", "-", "1", "]", "=", "1", "theta_k", "[", "k", "-", "1", "]", "=", "theta", "[", "k", "-", "1", "]", "l", "=", "2", "while", "True", ":", "# Akmaev step 2", "n", "=", "1", "thistheta", "=", "theta", "[", "l", "-", "1", "]", "while", "True", ":", "# Akmaev step 3", "if", "theta_k", "[", "k", "-", "1", "]", "<=", "thistheta", ":", "# Akmaev step 6", "k", "+=", "1", "break", "# to step 7", "else", ":", "if", "n", "<=", "1", ":", "s", "=", "q", "[", "l", "-", "1", "]", "t", "=", "s", "*", "thistheta", "# Akmaev step 4", "if", "n_k", "[", "k", "-", "1", "]", "<=", "1", ":", "# lower adjacent level is not an earlier-formed neutral layer", "s_k", "[", "k", "-", "1", "]", "=", "q", "[", "l", "-", "n", "-", "1", "]", "t_k", "[", "k", "-", "1", "]", "=", "s_k", "[", "k", "-", "1", "]", "*", "theta_k", "[", "k", "-", "1", "]", "# Akmaev step 5", "# join current and underlying layers", "n", "+=", "n_k", "[", "k", "-", "1", "]", "s", "+=", "s_k", "[", "k", "-", "1", "]", "t", "+=", "t_k", "[", "k", "-", "1", "]", "s_k", "[", "k", "-", "1", "]", "=", "s", "t_k", "[", "k", "-", "1", "]", "=", "t", "thistheta", "=", "t", "/", "s", "if", "k", "==", "1", ":", "# joint neutral layer is the first one", "break", "# to step 7", "k", "-=", "1", "# back to step 3", "# Akmaev step 7", "if", "l", "==", "L", ":", "# the scan is over", "break", "# to step 8", "l", "+=", "1", "n_k", "[", "k", "-", "1", "]", "=", "n", "theta_k", "[", "k", "-", "1", "]", "=", "thistheta", "# back to step 2", "# update the potential temperatures", "while", "True", ":", "while", "True", ":", "# Akmaev step 8", "if", "n", "==", "1", ":", "# current model level was not included in any neutral layer", "break", "# to step 11", "while", "True", ":", "# Akmaev step 9", "theta", "[", "l", "-", "1", "]", "=", "thistheta", "if", "n", "==", "1", ":", "break", "# Akmaev step 10", "l", "-=", "1", "n", "-=", "1", "# back to step 9", "# Akmaev step 11", "if", "k", "==", "1", ":", "break", "k", "-=", "1", "l", "-=", "1", "n", "=", "n_k", "[", "k", "-", "1", "]", "thistheta", "=", "theta_k", "[", "k", "-", "1", "]", "# back to step 8", "return", "theta" ]
Single column only.
[ "Single", "column", "only", "." ]
python
train
poppy-project/pypot
pypot/vrep/io.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L162-L167
def set_motor_force(self, motor_name, force): """ Sets the maximum force or torque that a joint can exert. """ self.call_remote_api('simxSetJointForce', self.get_object_handle(motor_name), force, sending=True)
[ "def", "set_motor_force", "(", "self", ",", "motor_name", ",", "force", ")", ":", "self", ".", "call_remote_api", "(", "'simxSetJointForce'", ",", "self", ".", "get_object_handle", "(", "motor_name", ")", ",", "force", ",", "sending", "=", "True", ")" ]
Sets the maximum force or torque that a joint can exert.
[ "Sets", "the", "maximum", "force", "or", "torque", "that", "a", "joint", "can", "exert", "." ]
python
train
Mxit/python-mxit
mxit/services.py
https://github.com/Mxit/python-mxit/blob/6b18a54ef6fbfe1f9d94755ba3d4ad77743c8b0c/mxit/services.py#L403-L417
def get_gallery_file(self, file_id, output_file_path=None, scope='content/read'): """ Get a file in the Mxit user's gallery User authentication required with the following scope: 'content/read' """ data = _get( token=self.oauth.get_user_token(scope), uri='/user/media/content/' + urllib.quote(file_id) ) if output_file_path: with open(output_file_path, 'w') as f: f.write(data) else: return data
[ "def", "get_gallery_file", "(", "self", ",", "file_id", ",", "output_file_path", "=", "None", ",", "scope", "=", "'content/read'", ")", ":", "data", "=", "_get", "(", "token", "=", "self", ".", "oauth", ".", "get_user_token", "(", "scope", ")", ",", "uri", "=", "'/user/media/content/'", "+", "urllib", ".", "quote", "(", "file_id", ")", ")", "if", "output_file_path", ":", "with", "open", "(", "output_file_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "else", ":", "return", "data" ]
Get a file in the Mxit user's gallery User authentication required with the following scope: 'content/read'
[ "Get", "a", "file", "in", "the", "Mxit", "user", "s", "gallery", "User", "authentication", "required", "with", "the", "following", "scope", ":", "content", "/", "read" ]
python
train
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/cached_source_provider.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/cached_source_provider.py#L140-L146
def stop(self, stop_context): """ Perform any logic on solution stop """ for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
[ "def", "stop", "(", "self", ",", "stop_context", ")", ":", "for", "p", "in", "self", ".", "_providers", ":", "p", ".", "stop", "(", "stop_context", ")", "if", "self", ".", "_clear_stop", ":", "self", ".", "clear_cache", "(", ")" ]
Perform any logic on solution stop
[ "Perform", "any", "logic", "on", "solution", "stop" ]
python
train
gabstopper/smc-python
smc/vpn/elements.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/vpn/elements.py#L396-L407
def enable_disable_force_nat_t(self): """ Enable or disable NAT-T on this endpoint. If enabled, it will be disabled and vice versa. :return: None """ if self.force_nat_t: self.data['force_nat_t'] = False else: self.data['force_nat_t'] = True self.update()
[ "def", "enable_disable_force_nat_t", "(", "self", ")", ":", "if", "self", ".", "force_nat_t", ":", "self", ".", "data", "[", "'force_nat_t'", "]", "=", "False", "else", ":", "self", ".", "data", "[", "'force_nat_t'", "]", "=", "True", "self", ".", "update", "(", ")" ]
Enable or disable NAT-T on this endpoint. If enabled, it will be disabled and vice versa. :return: None
[ "Enable", "or", "disable", "NAT", "-", "T", "on", "this", "endpoint", ".", "If", "enabled", "it", "will", "be", "disabled", "and", "vice", "versa", "." ]
python
train
hotdogee/gff3-py
gff3/gff3.py
https://github.com/hotdogee/gff3-py/blob/d239bc9ed1eb7014c174f5fbed754f0f02d6e1b9/gff3/gff3.py#L785-L803
def descendants(self, line_data): """ BFS graph algorithm :param line_data: line_data(dict) with line_data['line_index'] or line_index(int) :return: list of line_data(dict) """ # get start node try: start = line_data['line_index'] except TypeError: start = self.lines[line_data]['line_index'] visited_set, visited_list, queue = set(), [], [start] while queue: node = queue.pop(0) if node not in visited_set: visited_set.add(node) visited_list.append(self.lines[node]) queue.extend([ld['line_index'] for ld in self.lines[node]['children'] if ld['line_index'] not in visited_set]) return visited_list[1:]
[ "def", "descendants", "(", "self", ",", "line_data", ")", ":", "# get start node", "try", ":", "start", "=", "line_data", "[", "'line_index'", "]", "except", "TypeError", ":", "start", "=", "self", ".", "lines", "[", "line_data", "]", "[", "'line_index'", "]", "visited_set", ",", "visited_list", ",", "queue", "=", "set", "(", ")", ",", "[", "]", ",", "[", "start", "]", "while", "queue", ":", "node", "=", "queue", ".", "pop", "(", "0", ")", "if", "node", "not", "in", "visited_set", ":", "visited_set", ".", "add", "(", "node", ")", "visited_list", ".", "append", "(", "self", ".", "lines", "[", "node", "]", ")", "queue", ".", "extend", "(", "[", "ld", "[", "'line_index'", "]", "for", "ld", "in", "self", ".", "lines", "[", "node", "]", "[", "'children'", "]", "if", "ld", "[", "'line_index'", "]", "not", "in", "visited_set", "]", ")", "return", "visited_list", "[", "1", ":", "]" ]
BFS graph algorithm :param line_data: line_data(dict) with line_data['line_index'] or line_index(int) :return: list of line_data(dict)
[ "BFS", "graph", "algorithm", ":", "param", "line_data", ":", "line_data", "(", "dict", ")", "with", "line_data", "[", "line_index", "]", "or", "line_index", "(", "int", ")", ":", "return", ":", "list", "of", "line_data", "(", "dict", ")" ]
python
valid
cs50/style50
style50/_api.py
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L159-L179
def run_score(self): """ Run checks on self.files, printing raw percentage to stdout. """ diffs = 0 lines = 0 for file in self.files: try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue diffs += results.diffs lines += results.lines try: print(max(1 - diffs / lines, 0.0)) except ZeroDivisionError: print(0.0)
[ "def", "run_score", "(", "self", ")", ":", "diffs", "=", "0", "lines", "=", "0", "for", "file", "in", "self", ".", "files", ":", "try", ":", "results", "=", "self", ".", "_check", "(", "file", ")", "except", "Error", "as", "e", ":", "termcolor", ".", "cprint", "(", "e", ".", "msg", ",", "\"yellow\"", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "diffs", "+=", "results", ".", "diffs", "lines", "+=", "results", ".", "lines", "try", ":", "print", "(", "max", "(", "1", "-", "diffs", "/", "lines", ",", "0.0", ")", ")", "except", "ZeroDivisionError", ":", "print", "(", "0.0", ")" ]
Run checks on self.files, printing raw percentage to stdout.
[ "Run", "checks", "on", "self", ".", "files", "printing", "raw", "percentage", "to", "stdout", "." ]
python
train
senaite/senaite.core
bika/lims/content/analysisservice.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisservice.py#L51-L118
def getContainers(instance, minvol=None, allow_blank=True, show_container_types=True, show_containers=True): """ Containers vocabulary This is a separate class so that it can be called from ajax to filter the container list, as well as being used as the AT field vocabulary. Returns a tuple of tuples: ((object_uid, object_title), ()) If the partition is flagged 'Separate', only containers are displayed. If the Separate flag is false, displays container types. XXX bsc = self.portal.bika_setup_catalog XXX obj = bsc(getKeyword='Moist')[0].getObject() XXX u'Container Type: Canvas bag' in obj.getContainers().values() XXX True """ bsc = getToolByName(instance, 'bika_setup_catalog') items = [['', _('Any')]] if allow_blank else [] containers = [] for container in bsc(portal_type='Container', sort_on='sortable_title'): container = container.getObject() # verify container capacity is large enough for required sample volume. if minvol is not None: capacity = container.getCapacity() try: capacity = capacity.split(' ', 1) capacity = mg(float(capacity[0]), capacity[1]) if capacity < minvol: continue except (ValueError, TypeError): # if there's a unit conversion error, allow the container # to be displayed. pass containers.append(container) if show_containers: # containers with no containertype first for container in containers: if not container.getContainerType(): items.append([container.UID(), container.Title()]) ts = getToolByName(instance, 'translation_service').translate cat_str = _c(ts(_('Container Type'))) containertypes = [c.getContainerType() for c in containers] containertypes = dict([(ct.UID(), ct.Title()) for ct in containertypes if ct]) for ctype_uid, ctype_title in containertypes.items(): ctype_title = _c(ctype_title) if show_container_types: items.append([ctype_uid, "%s: %s" % (cat_str, ctype_title)]) if show_containers: for container in containers: ctype = container.getContainerType() if ctype and ctype.UID() == ctype_uid: items.append([container.UID(), container.Title()]) items = tuple(items) return items
[ "def", "getContainers", "(", "instance", ",", "minvol", "=", "None", ",", "allow_blank", "=", "True", ",", "show_container_types", "=", "True", ",", "show_containers", "=", "True", ")", ":", "bsc", "=", "getToolByName", "(", "instance", ",", "'bika_setup_catalog'", ")", "items", "=", "[", "[", "''", ",", "_", "(", "'Any'", ")", "]", "]", "if", "allow_blank", "else", "[", "]", "containers", "=", "[", "]", "for", "container", "in", "bsc", "(", "portal_type", "=", "'Container'", ",", "sort_on", "=", "'sortable_title'", ")", ":", "container", "=", "container", ".", "getObject", "(", ")", "# verify container capacity is large enough for required sample volume.", "if", "minvol", "is", "not", "None", ":", "capacity", "=", "container", ".", "getCapacity", "(", ")", "try", ":", "capacity", "=", "capacity", ".", "split", "(", "' '", ",", "1", ")", "capacity", "=", "mg", "(", "float", "(", "capacity", "[", "0", "]", ")", ",", "capacity", "[", "1", "]", ")", "if", "capacity", "<", "minvol", ":", "continue", "except", "(", "ValueError", ",", "TypeError", ")", ":", "# if there's a unit conversion error, allow the container", "# to be displayed.", "pass", "containers", ".", "append", "(", "container", ")", "if", "show_containers", ":", "# containers with no containertype first", "for", "container", "in", "containers", ":", "if", "not", "container", ".", "getContainerType", "(", ")", ":", "items", ".", "append", "(", "[", "container", ".", "UID", "(", ")", ",", "container", ".", "Title", "(", ")", "]", ")", "ts", "=", "getToolByName", "(", "instance", ",", "'translation_service'", ")", ".", "translate", "cat_str", "=", "_c", "(", "ts", "(", "_", "(", "'Container Type'", ")", ")", ")", "containertypes", "=", "[", "c", ".", "getContainerType", "(", ")", "for", "c", "in", "containers", "]", "containertypes", "=", "dict", "(", "[", "(", "ct", ".", "UID", "(", ")", ",", "ct", ".", "Title", "(", ")", ")", "for", "ct", "in", "containertypes", "if", "ct", "]", ")", "for", "ctype_uid", ",", "ctype_title", "in", "containertypes", ".", "items", "(", ")", ":", "ctype_title", "=", "_c", "(", "ctype_title", ")", "if", "show_container_types", ":", "items", ".", "append", "(", "[", "ctype_uid", ",", "\"%s: %s\"", "%", "(", "cat_str", ",", "ctype_title", ")", "]", ")", "if", "show_containers", ":", "for", "container", "in", "containers", ":", "ctype", "=", "container", ".", "getContainerType", "(", ")", "if", "ctype", "and", "ctype", ".", "UID", "(", ")", "==", "ctype_uid", ":", "items", ".", "append", "(", "[", "container", ".", "UID", "(", ")", ",", "container", ".", "Title", "(", ")", "]", ")", "items", "=", "tuple", "(", "items", ")", "return", "items" ]
Containers vocabulary This is a separate class so that it can be called from ajax to filter the container list, as well as being used as the AT field vocabulary. Returns a tuple of tuples: ((object_uid, object_title), ()) If the partition is flagged 'Separate', only containers are displayed. If the Separate flag is false, displays container types. XXX bsc = self.portal.bika_setup_catalog XXX obj = bsc(getKeyword='Moist')[0].getObject() XXX u'Container Type: Canvas bag' in obj.getContainers().values() XXX True
[ "Containers", "vocabulary" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/easy_install.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/easy_install.py#L1306-L1314
def create_home_path(self): """Create directories under ~.""" if not self.user: return home = convert_path(os.path.expanduser("~")) for name, path in self.config_vars.iteritems(): if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0700)" % path) os.makedirs(path, 0700)
[ "def", "create_home_path", "(", "self", ")", ":", "if", "not", "self", ".", "user", ":", "return", "home", "=", "convert_path", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ")", "for", "name", ",", "path", "in", "self", ".", "config_vars", ".", "iteritems", "(", ")", ":", "if", "path", ".", "startswith", "(", "home", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "self", ".", "debug_print", "(", "\"os.makedirs('%s', 0700)\"", "%", "path", ")", "os", ".", "makedirs", "(", "path", ",", "0700", ")" ]
Create directories under ~.
[ "Create", "directories", "under", "~", "." ]
python
test
chrislit/abydos
abydos/compression/_arithmetic.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/compression/_arithmetic.py#L86-L148
def train(self, text): r"""Generate a probability dict from the provided text. Text to 0-order probability statistics as a dict Parameters ---------- text : str The text data over which to calculate probability statistics. This must not contain the NUL (0x00) character because that is used to indicate the end of data. Example ------- >>> ac = Arithmetic() >>> ac.train('the quick brown fox jumped over the lazy dog') >>> ac.get_probs() {' ': (Fraction(0, 1), Fraction(8, 45)), 'o': (Fraction(8, 45), Fraction(4, 15)), 'e': (Fraction(4, 15), Fraction(16, 45)), 'u': (Fraction(16, 45), Fraction(2, 5)), 't': (Fraction(2, 5), Fraction(4, 9)), 'r': (Fraction(4, 9), Fraction(22, 45)), 'h': (Fraction(22, 45), Fraction(8, 15)), 'd': (Fraction(8, 15), Fraction(26, 45)), 'z': (Fraction(26, 45), Fraction(3, 5)), 'y': (Fraction(3, 5), Fraction(28, 45)), 'x': (Fraction(28, 45), Fraction(29, 45)), 'w': (Fraction(29, 45), Fraction(2, 3)), 'v': (Fraction(2, 3), Fraction(31, 45)), 'q': (Fraction(31, 45), Fraction(32, 45)), 'p': (Fraction(32, 45), Fraction(11, 15)), 'n': (Fraction(11, 15), Fraction(34, 45)), 'm': (Fraction(34, 45), Fraction(7, 9)), 'l': (Fraction(7, 9), Fraction(4, 5)), 'k': (Fraction(4, 5), Fraction(37, 45)), 'j': (Fraction(37, 45), Fraction(38, 45)), 'i': (Fraction(38, 45), Fraction(13, 15)), 'g': (Fraction(13, 15), Fraction(8, 9)), 'f': (Fraction(8, 9), Fraction(41, 45)), 'c': (Fraction(41, 45), Fraction(14, 15)), 'b': (Fraction(14, 15), Fraction(43, 45)), 'a': (Fraction(43, 45), Fraction(44, 45)), '\x00': (Fraction(44, 45), Fraction(1, 1))} """ text = text_type(text) if '\x00' in text: text = text.replace('\x00', ' ') counts = Counter(text) counts['\x00'] = 1 tot_letters = sum(counts.values()) tot = 0 self._probs = {} prev = Fraction(0) for char, count in sorted( counts.items(), key=lambda x: (x[1], x[0]), reverse=True ): follow = Fraction(tot + count, tot_letters) self._probs[char] = (prev, follow) prev = follow tot = tot + count
[ "def", "train", "(", "self", ",", "text", ")", ":", "text", "=", "text_type", "(", "text", ")", "if", "'\\x00'", "in", "text", ":", "text", "=", "text", ".", "replace", "(", "'\\x00'", ",", "' '", ")", "counts", "=", "Counter", "(", "text", ")", "counts", "[", "'\\x00'", "]", "=", "1", "tot_letters", "=", "sum", "(", "counts", ".", "values", "(", ")", ")", "tot", "=", "0", "self", ".", "_probs", "=", "{", "}", "prev", "=", "Fraction", "(", "0", ")", "for", "char", ",", "count", "in", "sorted", "(", "counts", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", ":", "follow", "=", "Fraction", "(", "tot", "+", "count", ",", "tot_letters", ")", "self", ".", "_probs", "[", "char", "]", "=", "(", "prev", ",", "follow", ")", "prev", "=", "follow", "tot", "=", "tot", "+", "count" ]
r"""Generate a probability dict from the provided text. Text to 0-order probability statistics as a dict Parameters ---------- text : str The text data over which to calculate probability statistics. This must not contain the NUL (0x00) character because that is used to indicate the end of data. Example ------- >>> ac = Arithmetic() >>> ac.train('the quick brown fox jumped over the lazy dog') >>> ac.get_probs() {' ': (Fraction(0, 1), Fraction(8, 45)), 'o': (Fraction(8, 45), Fraction(4, 15)), 'e': (Fraction(4, 15), Fraction(16, 45)), 'u': (Fraction(16, 45), Fraction(2, 5)), 't': (Fraction(2, 5), Fraction(4, 9)), 'r': (Fraction(4, 9), Fraction(22, 45)), 'h': (Fraction(22, 45), Fraction(8, 15)), 'd': (Fraction(8, 15), Fraction(26, 45)), 'z': (Fraction(26, 45), Fraction(3, 5)), 'y': (Fraction(3, 5), Fraction(28, 45)), 'x': (Fraction(28, 45), Fraction(29, 45)), 'w': (Fraction(29, 45), Fraction(2, 3)), 'v': (Fraction(2, 3), Fraction(31, 45)), 'q': (Fraction(31, 45), Fraction(32, 45)), 'p': (Fraction(32, 45), Fraction(11, 15)), 'n': (Fraction(11, 15), Fraction(34, 45)), 'm': (Fraction(34, 45), Fraction(7, 9)), 'l': (Fraction(7, 9), Fraction(4, 5)), 'k': (Fraction(4, 5), Fraction(37, 45)), 'j': (Fraction(37, 45), Fraction(38, 45)), 'i': (Fraction(38, 45), Fraction(13, 15)), 'g': (Fraction(13, 15), Fraction(8, 9)), 'f': (Fraction(8, 9), Fraction(41, 45)), 'c': (Fraction(41, 45), Fraction(14, 15)), 'b': (Fraction(14, 15), Fraction(43, 45)), 'a': (Fraction(43, 45), Fraction(44, 45)), '\x00': (Fraction(44, 45), Fraction(1, 1))}
[ "r", "Generate", "a", "probability", "dict", "from", "the", "provided", "text", "." ]
python
valid
matthewdeanmartin/jiggle_version
build.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/build.py#L294-L303
def docs(): """ Docs """ with safe_cd(SRC): with safe_cd("docs"): my_env = config_pythonpath() command = "{0} make html".format(PIPENV).strip() print(command) execute_with_environment(command, env=my_env)
[ "def", "docs", "(", ")", ":", "with", "safe_cd", "(", "SRC", ")", ":", "with", "safe_cd", "(", "\"docs\"", ")", ":", "my_env", "=", "config_pythonpath", "(", ")", "command", "=", "\"{0} make html\"", ".", "format", "(", "PIPENV", ")", ".", "strip", "(", ")", "print", "(", "command", ")", "execute_with_environment", "(", "command", ",", "env", "=", "my_env", ")" ]
Docs
[ "Docs" ]
python
train
kytos/kytos-utils
kytos/utils/napps.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L565-L575
def reload(self, napps=None): """Reload a NApp or all NApps. Args: napps (list): NApp list to be reloaded. Raises: requests.HTTPError: When there's a server error. """ client = NAppsClient(self._config) client.reload_napps(napps)
[ "def", "reload", "(", "self", ",", "napps", "=", "None", ")", ":", "client", "=", "NAppsClient", "(", "self", ".", "_config", ")", "client", ".", "reload_napps", "(", "napps", ")" ]
Reload a NApp or all NApps. Args: napps (list): NApp list to be reloaded. Raises: requests.HTTPError: When there's a server error.
[ "Reload", "a", "NApp", "or", "all", "NApps", "." ]
python
train
riga/scinum
scinum.py
https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1629-L1644
def infer_si_prefix(f): """ Infers the SI prefix of a value *f* and returns the string label and decimal magnitude in a 2-tuple. Example: .. code-block:: python infer_si_prefix(1) # -> ("", 0) infer_si_prefix(25) # -> ("", 0) infer_si_prefix(4320) # -> ("k", 3) """ if f == 0: return "", 0 else: mag = 3 * int(math.log10(abs(float(f))) // 3) return si_refixes[mag], mag
[ "def", "infer_si_prefix", "(", "f", ")", ":", "if", "f", "==", "0", ":", "return", "\"\"", ",", "0", "else", ":", "mag", "=", "3", "*", "int", "(", "math", ".", "log10", "(", "abs", "(", "float", "(", "f", ")", ")", ")", "//", "3", ")", "return", "si_refixes", "[", "mag", "]", ",", "mag" ]
Infers the SI prefix of a value *f* and returns the string label and decimal magnitude in a 2-tuple. Example: .. code-block:: python infer_si_prefix(1) # -> ("", 0) infer_si_prefix(25) # -> ("", 0) infer_si_prefix(4320) # -> ("k", 3)
[ "Infers", "the", "SI", "prefix", "of", "a", "value", "*", "f", "*", "and", "returns", "the", "string", "label", "and", "decimal", "magnitude", "in", "a", "2", "-", "tuple", ".", "Example", ":" ]
python
train
bookieio/breadability
breadability/readable.py
https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L55-L64
def ok_embedded_video(node): """Check if this embed/video is an ok one to count.""" good_keywords = ('youtube', 'blip.tv', 'vimeo') node_str = tounicode(node) for key in good_keywords: if key in node_str: return True return False
[ "def", "ok_embedded_video", "(", "node", ")", ":", "good_keywords", "=", "(", "'youtube'", ",", "'blip.tv'", ",", "'vimeo'", ")", "node_str", "=", "tounicode", "(", "node", ")", "for", "key", "in", "good_keywords", ":", "if", "key", "in", "node_str", ":", "return", "True", "return", "False" ]
Check if this embed/video is an ok one to count.
[ "Check", "if", "this", "embed", "/", "video", "is", "an", "ok", "one", "to", "count", "." ]
python
train
icgood/pymap
pymap/config.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/config.py#L110-L122
def from_args(cls: Type[ConfigT], args: Namespace) -> ConfigT: """Build and return a new :class:`IMAPConfig` using command-line arguments. Args: args: The arguments parsed from the command-line. """ parsed_args = cls.parse_args(args) return cls(args, host=args.host, port=args.port, debug=args.debug, reject_insecure_auth=not args.insecure_login, cert_file=args.cert, key_file=args.key, **parsed_args)
[ "def", "from_args", "(", "cls", ":", "Type", "[", "ConfigT", "]", ",", "args", ":", "Namespace", ")", "->", "ConfigT", ":", "parsed_args", "=", "cls", ".", "parse_args", "(", "args", ")", "return", "cls", "(", "args", ",", "host", "=", "args", ".", "host", ",", "port", "=", "args", ".", "port", ",", "debug", "=", "args", ".", "debug", ",", "reject_insecure_auth", "=", "not", "args", ".", "insecure_login", ",", "cert_file", "=", "args", ".", "cert", ",", "key_file", "=", "args", ".", "key", ",", "*", "*", "parsed_args", ")" ]
Build and return a new :class:`IMAPConfig` using command-line arguments. Args: args: The arguments parsed from the command-line.
[ "Build", "and", "return", "a", "new", ":", "class", ":", "IMAPConfig", "using", "command", "-", "line", "arguments", "." ]
python
train
pricingassistant/mrq
mrq/queue_raw.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L147-L159
def count_jobs_to_dequeue(self): """ Returns the number of jobs that can be dequeued right now from the queue. """ # timed ZSET if self.is_timed: return context.connections.redis.zcount( self.redis_key, "-inf", time.time()) # In all other cases, it's the same as .size() else: return self.size()
[ "def", "count_jobs_to_dequeue", "(", "self", ")", ":", "# timed ZSET", "if", "self", ".", "is_timed", ":", "return", "context", ".", "connections", ".", "redis", ".", "zcount", "(", "self", ".", "redis_key", ",", "\"-inf\"", ",", "time", ".", "time", "(", ")", ")", "# In all other cases, it's the same as .size()", "else", ":", "return", "self", ".", "size", "(", ")" ]
Returns the number of jobs that can be dequeued right now from the queue.
[ "Returns", "the", "number", "of", "jobs", "that", "can", "be", "dequeued", "right", "now", "from", "the", "queue", "." ]
python
train
portfors-lab/sparkle
sparkle/run/calibration_runner.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L103-L109
def set_duration(self, dur): """See :meth:`AbstractCalibrationRunner<sparkle.run.calibration_runner.AbstractCalibrationRunner.set_duration>`""" # this may be set at any time, and is not checked before run, so set # all stim components for comp in self.stim_components: comp.setDuration(dur) self.reftone.setDuration(dur)
[ "def", "set_duration", "(", "self", ",", "dur", ")", ":", "# this may be set at any time, and is not checked before run, so set", "# all stim components", "for", "comp", "in", "self", ".", "stim_components", ":", "comp", ".", "setDuration", "(", "dur", ")", "self", ".", "reftone", ".", "setDuration", "(", "dur", ")" ]
See :meth:`AbstractCalibrationRunner<sparkle.run.calibration_runner.AbstractCalibrationRunner.set_duration>`
[ "See", ":", "meth", ":", "AbstractCalibrationRunner<sparkle", ".", "run", ".", "calibration_runner", ".", "AbstractCalibrationRunner", ".", "set_duration", ">" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/units.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/units.py#L363-L376
def delete_dimension(dimension_id,**kwargs): """ Delete a dimension from the DB. Raises and exception if the dimension does not exist """ try: dimension = db.DBSession.query(Dimension).filter(Dimension.id==dimension_id).one() db.DBSession.query(Unit).filter(Unit.dimension_id==dimension.id).delete() db.DBSession.delete(dimension) db.DBSession.flush() return True except NoResultFound: raise ResourceNotFoundError("Dimension (dimension_id=%s) does not exist"%(dimension_id))
[ "def", "delete_dimension", "(", "dimension_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "dimension", "=", "db", ".", "DBSession", ".", "query", "(", "Dimension", ")", ".", "filter", "(", "Dimension", ".", "id", "==", "dimension_id", ")", ".", "one", "(", ")", "db", ".", "DBSession", ".", "query", "(", "Unit", ")", ".", "filter", "(", "Unit", ".", "dimension_id", "==", "dimension", ".", "id", ")", ".", "delete", "(", ")", "db", ".", "DBSession", ".", "delete", "(", "dimension", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "True", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Dimension (dimension_id=%s) does not exist\"", "%", "(", "dimension_id", ")", ")" ]
Delete a dimension from the DB. Raises and exception if the dimension does not exist
[ "Delete", "a", "dimension", "from", "the", "DB", ".", "Raises", "and", "exception", "if", "the", "dimension", "does", "not", "exist" ]
python
train
googlefonts/ufo2ft
Lib/ufo2ft/util.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/util.py#L221-L229
def closeGlyphsOverGSUB(gsub, glyphs): """ Use the FontTools subsetter to perform a closure over the GSUB table given the initial `glyphs` (set of glyph names, str). Update the set in-place adding all the glyph names that can be reached via GSUB substitutions from this initial set. """ subsetter = subset.Subsetter() subsetter.glyphs = glyphs gsub.closure_glyphs(subsetter)
[ "def", "closeGlyphsOverGSUB", "(", "gsub", ",", "glyphs", ")", ":", "subsetter", "=", "subset", ".", "Subsetter", "(", ")", "subsetter", ".", "glyphs", "=", "glyphs", "gsub", ".", "closure_glyphs", "(", "subsetter", ")" ]
Use the FontTools subsetter to perform a closure over the GSUB table given the initial `glyphs` (set of glyph names, str). Update the set in-place adding all the glyph names that can be reached via GSUB substitutions from this initial set.
[ "Use", "the", "FontTools", "subsetter", "to", "perform", "a", "closure", "over", "the", "GSUB", "table", "given", "the", "initial", "glyphs", "(", "set", "of", "glyph", "names", "str", ")", ".", "Update", "the", "set", "in", "-", "place", "adding", "all", "the", "glyph", "names", "that", "can", "be", "reached", "via", "GSUB", "substitutions", "from", "this", "initial", "set", "." ]
python
train
django-fluent/django-fluent-blogs
fluent_blogs/sitemaps.py
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/sitemaps.py#L86-L89
def lastmod(self, tag): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date') return lastitems[0].modification_date
[ "def", "lastmod", "(", "self", ",", "tag", ")", ":", "lastitems", "=", "EntryModel", ".", "objects", ".", "published", "(", ")", ".", "order_by", "(", "'-modification_date'", ")", ".", "filter", "(", "tags", "=", "tag", ")", ".", "only", "(", "'modification_date'", ")", "return", "lastitems", "[", "0", "]", ".", "modification_date" ]
Return the last modification of the entry.
[ "Return", "the", "last", "modification", "of", "the", "entry", "." ]
python
train
acsone/git-aggregator
git_aggregator/repo.py
https://github.com/acsone/git-aggregator/blob/8631b0e64f9e8ce1857b21adeddb890ebd8469a6/git_aggregator/repo.py#L153-L162
def log_call(self, cmd, callwith=subprocess.check_call, log_level=logging.DEBUG, **kw): """Wrap a subprocess call with logging :param meth: the calling method to use. """ logger.log(log_level, "%s> call %r", self.cwd, cmd) ret = callwith(cmd, **kw) if callwith == subprocess.check_output: ret = console_to_str(ret) return ret
[ "def", "log_call", "(", "self", ",", "cmd", ",", "callwith", "=", "subprocess", ".", "check_call", ",", "log_level", "=", "logging", ".", "DEBUG", ",", "*", "*", "kw", ")", ":", "logger", ".", "log", "(", "log_level", ",", "\"%s> call %r\"", ",", "self", ".", "cwd", ",", "cmd", ")", "ret", "=", "callwith", "(", "cmd", ",", "*", "*", "kw", ")", "if", "callwith", "==", "subprocess", ".", "check_output", ":", "ret", "=", "console_to_str", "(", "ret", ")", "return", "ret" ]
Wrap a subprocess call with logging :param meth: the calling method to use.
[ "Wrap", "a", "subprocess", "call", "with", "logging", ":", "param", "meth", ":", "the", "calling", "method", "to", "use", "." ]
python
train
JonathanRaiman/pytreebank
pytreebank/parse.py
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L163-L187
def load_sst(path=None, url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'): """ Download and read in the Stanford Sentiment Treebank dataset into a dictionary with a 'train', 'dev', and 'test' keys. The dictionary keys point to lists of LabeledTrees. Arguments: ---------- path : str, (optional defaults to ~/stanford_sentiment_treebank), directory where the corpus should be downloaded (and imported from). url : str, where the corpus should be downloaded from (defaults to nlp.stanford.edu address). Returns: -------- dict : loaded dataset """ if path is None: # find a good temporary path path = os.path.expanduser("~/stanford_sentiment_treebank/") makedirs(path, exist_ok=True) fnames = download_sst(path, url) return {key: import_tree_corpus(value) for key, value in fnames.items()}
[ "def", "load_sst", "(", "path", "=", "None", ",", "url", "=", "'http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'", ")", ":", "if", "path", "is", "None", ":", "# find a good temporary path", "path", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/stanford_sentiment_treebank/\"", ")", "makedirs", "(", "path", ",", "exist_ok", "=", "True", ")", "fnames", "=", "download_sst", "(", "path", ",", "url", ")", "return", "{", "key", ":", "import_tree_corpus", "(", "value", ")", "for", "key", ",", "value", "in", "fnames", ".", "items", "(", ")", "}" ]
Download and read in the Stanford Sentiment Treebank dataset into a dictionary with a 'train', 'dev', and 'test' keys. The dictionary keys point to lists of LabeledTrees. Arguments: ---------- path : str, (optional defaults to ~/stanford_sentiment_treebank), directory where the corpus should be downloaded (and imported from). url : str, where the corpus should be downloaded from (defaults to nlp.stanford.edu address). Returns: -------- dict : loaded dataset
[ "Download", "and", "read", "in", "the", "Stanford", "Sentiment", "Treebank", "dataset", "into", "a", "dictionary", "with", "a", "train", "dev", "and", "test", "keys", ".", "The", "dictionary", "keys", "point", "to", "lists", "of", "LabeledTrees", "." ]
python
train
ClericPy/torequests
torequests/utils.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/utils.py#L347-L401
def timeago(seconds=0, accuracy=4, format=0, lang="en"): """Translate seconds into human-readable. :param seconds: seconds (float/int). :param accuracy: 4 by default (units[:accuracy]), determine the length of elements. :param format: index of [led, literal, dict]. :param lang: en or cn. :param units: day, hour, minute, second, ms. >>> timeago(93245732.0032424, 5) '1079 days, 05:35:32,003' >>> timeago(93245732.0032424, 4, 1) '1079 days 5 hours 35 minutes 32 seconds' >>> timeago(-389, 4, 1) '-6 minutes 29 seconds 0 ms' """ assert format in [0, 1, 2], ValueError("format arg should be one of 0, 1, 2") negative = "-" if seconds < 0 else "" seconds = abs(seconds) if lang == "en": units = ("day", "hour", "minute", "second", "ms") elif lang == "cn": units = (u"天", u"小时", u"分钟", u"秒", u"毫秒") times = split_seconds(seconds) if format == 2: return dict(zip(units, times)) day, hour, minute, second, ms = times if format == 0: day_str = ( "%d %s%s, " % (day, units[0], "s" if day > 1 and lang == "en" else "") if day else "" ) mid_str = ":".join(("%02d" % i for i in (hour, minute, second))) if accuracy > 4: mid_str += ",%03d" % ms return negative + day_str + mid_str elif format == 1: # find longest valid fields index (non-zero in front) valid_index = 0 for x, i in enumerate(times): if i > 0: valid_index = x break else: valid_index = x result_str = [ "%d %s%s" % (num, unit, "s" if lang == "en" and num > 1 and unit != "ms" else "") for num, unit in zip(times, units) ][valid_index:][:accuracy] result_str = " ".join(result_str) return negative + result_str
[ "def", "timeago", "(", "seconds", "=", "0", ",", "accuracy", "=", "4", ",", "format", "=", "0", ",", "lang", "=", "\"en\"", ")", ":", "assert", "format", "in", "[", "0", ",", "1", ",", "2", "]", ",", "ValueError", "(", "\"format arg should be one of 0, 1, 2\"", ")", "negative", "=", "\"-\"", "if", "seconds", "<", "0", "else", "\"\"", "seconds", "=", "abs", "(", "seconds", ")", "if", "lang", "==", "\"en\"", ":", "units", "=", "(", "\"day\"", ",", "\"hour\"", ",", "\"minute\"", ",", "\"second\"", ",", "\"ms\"", ")", "elif", "lang", "==", "\"cn\"", ":", "units", "=", "(", "u\"天\", ", "u", "小时\", u\"分钟", "\"", " u\"秒\", u\"", "毫", "\")", "", "", "", "times", "=", "split_seconds", "(", "seconds", ")", "if", "format", "==", "2", ":", "return", "dict", "(", "zip", "(", "units", ",", "times", ")", ")", "day", ",", "hour", ",", "minute", ",", "second", ",", "ms", "=", "times", "if", "format", "==", "0", ":", "day_str", "=", "(", "\"%d %s%s, \"", "%", "(", "day", ",", "units", "[", "0", "]", ",", "\"s\"", "if", "day", ">", "1", "and", "lang", "==", "\"en\"", "else", "\"\"", ")", "if", "day", "else", "\"\"", ")", "mid_str", "=", "\":\"", ".", "join", "(", "(", "\"%02d\"", "%", "i", "for", "i", "in", "(", "hour", ",", "minute", ",", "second", ")", ")", ")", "if", "accuracy", ">", "4", ":", "mid_str", "+=", "\",%03d\"", "%", "ms", "return", "negative", "+", "day_str", "+", "mid_str", "elif", "format", "==", "1", ":", "# find longest valid fields index (non-zero in front)", "valid_index", "=", "0", "for", "x", ",", "i", "in", "enumerate", "(", "times", ")", ":", "if", "i", ">", "0", ":", "valid_index", "=", "x", "break", "else", ":", "valid_index", "=", "x", "result_str", "=", "[", "\"%d %s%s\"", "%", "(", "num", ",", "unit", ",", "\"s\"", "if", "lang", "==", "\"en\"", "and", "num", ">", "1", "and", "unit", "!=", "\"ms\"", "else", "\"\"", ")", "for", "num", ",", "unit", "in", "zip", "(", "times", ",", "units", ")", "]", "[", "valid_index", ":", "]", "[", ":", "accuracy", "]", "result_str", "=", "\" \"", ".", "join", "(", "result_str", ")", "return", "negative", "+", "result_str" ]
Translate seconds into human-readable. :param seconds: seconds (float/int). :param accuracy: 4 by default (units[:accuracy]), determine the length of elements. :param format: index of [led, literal, dict]. :param lang: en or cn. :param units: day, hour, minute, second, ms. >>> timeago(93245732.0032424, 5) '1079 days, 05:35:32,003' >>> timeago(93245732.0032424, 4, 1) '1079 days 5 hours 35 minutes 32 seconds' >>> timeago(-389, 4, 1) '-6 minutes 29 seconds 0 ms'
[ "Translate", "seconds", "into", "human", "-", "readable", "." ]
python
train
PyAr/fades
fades/cache.py
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L200-L208
def _read_cache(self): """Read virtualenv metadata from cache.""" if os.path.exists(self.filepath): with open(self.filepath, 'rt', encoding='utf8') as fh: lines = [x.strip() for x in fh] else: logger.debug("Index not found, starting empty") lines = [] return lines
[ "def", "_read_cache", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "filepath", ")", ":", "with", "open", "(", "self", ".", "filepath", ",", "'rt'", ",", "encoding", "=", "'utf8'", ")", "as", "fh", ":", "lines", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "fh", "]", "else", ":", "logger", ".", "debug", "(", "\"Index not found, starting empty\"", ")", "lines", "=", "[", "]", "return", "lines" ]
Read virtualenv metadata from cache.
[ "Read", "virtualenv", "metadata", "from", "cache", "." ]
python
train
dw/mitogen
ansible_mitogen/connection.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L860-L869
def spawn_isolated_child(self): """ Fork or launch a new child off the target context. :returns: mitogen.core.Context of the new child. """ return self.get_chain(use_fork=True).call( ansible_mitogen.target.spawn_isolated_child )
[ "def", "spawn_isolated_child", "(", "self", ")", ":", "return", "self", ".", "get_chain", "(", "use_fork", "=", "True", ")", ".", "call", "(", "ansible_mitogen", ".", "target", ".", "spawn_isolated_child", ")" ]
Fork or launch a new child off the target context. :returns: mitogen.core.Context of the new child.
[ "Fork", "or", "launch", "a", "new", "child", "off", "the", "target", "context", "." ]
python
train
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L723-L725
def verify(self, base_path, update=False): """Verify the submission and return testables that can be executed.""" return self.project.verify_submission(base_path, self, update=update)
[ "def", "verify", "(", "self", ",", "base_path", ",", "update", "=", "False", ")", ":", "return", "self", ".", "project", ".", "verify_submission", "(", "base_path", ",", "self", ",", "update", "=", "update", ")" ]
Verify the submission and return testables that can be executed.
[ "Verify", "the", "submission", "and", "return", "testables", "that", "can", "be", "executed", "." ]
python
train
pystorm/pystorm
pystorm/component.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L544-L550
def _exit(self, status_code): """Properly kill Python process including zombie threads.""" # If there are active threads still running infinite loops, sys.exit # won't kill them but os._exit will. os._exit skips calling cleanup # handlers, flushing stdio buffers, etc. exit_func = os._exit if threading.active_count() > 1 else sys.exit exit_func(status_code)
[ "def", "_exit", "(", "self", ",", "status_code", ")", ":", "# If there are active threads still running infinite loops, sys.exit", "# won't kill them but os._exit will. os._exit skips calling cleanup", "# handlers, flushing stdio buffers, etc.", "exit_func", "=", "os", ".", "_exit", "if", "threading", ".", "active_count", "(", ")", ">", "1", "else", "sys", ".", "exit", "exit_func", "(", "status_code", ")" ]
Properly kill Python process including zombie threads.
[ "Properly", "kill", "Python", "process", "including", "zombie", "threads", "." ]
python
train
bitlabstudio/django-development-fabfile
development_fabfile/fabfile/local.py
https://github.com/bitlabstudio/django-development-fabfile/blob/a135c6eb5bdd0b496a7eccfd271aca558dd99243/development_fabfile/fabfile/local.py#L42-L61
def check_coverage(): """Checks if the coverage is 100%.""" with lcd(settings.LOCAL_COVERAGE_PATH): total_line = local('grep -n Total index.html', capture=True) match = re.search(r'^(\d+):', total_line) total_line_number = int(match.groups()[0]) percentage_line_number = total_line_number + 5 percentage_line = local( 'awk NR=={0} index.html'.format(percentage_line_number), capture=True) match = re.search(r'(\d.+)%', percentage_line) try: percentage = float(match.groups()[0]) except ValueError: # If there's no dotting try another search match = re.search(r'(\d+)%', percentage_line) percentage = float(match.groups()[0]) if percentage < 100: abort(red('Coverage is {0}%'.format(percentage))) print(green('Coverage is {0}%'.format(percentage)))
[ "def", "check_coverage", "(", ")", ":", "with", "lcd", "(", "settings", ".", "LOCAL_COVERAGE_PATH", ")", ":", "total_line", "=", "local", "(", "'grep -n Total index.html'", ",", "capture", "=", "True", ")", "match", "=", "re", ".", "search", "(", "r'^(\\d+):'", ",", "total_line", ")", "total_line_number", "=", "int", "(", "match", ".", "groups", "(", ")", "[", "0", "]", ")", "percentage_line_number", "=", "total_line_number", "+", "5", "percentage_line", "=", "local", "(", "'awk NR=={0} index.html'", ".", "format", "(", "percentage_line_number", ")", ",", "capture", "=", "True", ")", "match", "=", "re", ".", "search", "(", "r'(\\d.+)%'", ",", "percentage_line", ")", "try", ":", "percentage", "=", "float", "(", "match", ".", "groups", "(", ")", "[", "0", "]", ")", "except", "ValueError", ":", "# If there's no dotting try another search", "match", "=", "re", ".", "search", "(", "r'(\\d+)%'", ",", "percentage_line", ")", "percentage", "=", "float", "(", "match", ".", "groups", "(", ")", "[", "0", "]", ")", "if", "percentage", "<", "100", ":", "abort", "(", "red", "(", "'Coverage is {0}%'", ".", "format", "(", "percentage", ")", ")", ")", "print", "(", "green", "(", "'Coverage is {0}%'", ".", "format", "(", "percentage", ")", ")", ")" ]
Checks if the coverage is 100%.
[ "Checks", "if", "the", "coverage", "is", "100%", "." ]
python
train
pyvisa/pyvisa
pyvisa/resources/resource.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L176-L183
def resource_info(self): """Get the extended information of this resource. :param resource_name: Unique symbolic name of a resource. :rtype: :class:`pyvisa.highlevel.ResourceInfo` """ return self.visalib.parse_resource_extended(self._resource_manager.session, self.resource_name)
[ "def", "resource_info", "(", "self", ")", ":", "return", "self", ".", "visalib", ".", "parse_resource_extended", "(", "self", ".", "_resource_manager", ".", "session", ",", "self", ".", "resource_name", ")" ]
Get the extended information of this resource. :param resource_name: Unique symbolic name of a resource. :rtype: :class:`pyvisa.highlevel.ResourceInfo`
[ "Get", "the", "extended", "information", "of", "this", "resource", "." ]
python
train
kcallin/mqtt-codec
mqtt_codec/io.py
https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L317-L339
def unpack_utf8(self): """Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from the underlying stream. """ num_bytes_consumed, s = decode_utf8(self.__f) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, s
[ "def", "unpack_utf8", "(", "self", ")", ":", "num_bytes_consumed", ",", "s", "=", "decode_utf8", "(", "self", ".", "__f", ")", "self", ".", "__num_bytes_consumed", "+=", "num_bytes_consumed", "return", "num_bytes_consumed", ",", "s" ]
Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from the underlying stream.
[ "Decode", "a", "utf", "-", "8", "string", "encoded", "as", "described", "in", "MQTT", "Version", "3", ".", "1", ".", "1", "section", "1", ".", "5", ".", "3", "line", "177", ".", "This", "is", "a", "16", "-", "bit", "unsigned", "length", "followed", "by", "a", "utf", "-", "8", "encoded", "string", "." ]
python
train
OSSOS/MOP
src/jjk/preproc/MOPplot.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPplot.py#L758-L762
def set_pointing_label(self): """Let the label of the current pointing to the value in the plabel box""" self.pointings[self.current]['label']['text']=w.plabel.get() self.reset()
[ "def", "set_pointing_label", "(", "self", ")", ":", "self", ".", "pointings", "[", "self", ".", "current", "]", "[", "'label'", "]", "[", "'text'", "]", "=", "w", ".", "plabel", ".", "get", "(", ")", "self", ".", "reset", "(", ")" ]
Let the label of the current pointing to the value in the plabel box
[ "Let", "the", "label", "of", "the", "current", "pointing", "to", "the", "value", "in", "the", "plabel", "box" ]
python
train
Metatab/metapack
metapack/html.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L477-L619
def display_context(doc): """Create a Jinja context for display""" from rowgenerators.exceptions import DownloadError context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'} #import json #print(json.dumps(context, indent=4)) mandatory_sections = ['documentation', 'contacts'] # Remove section names deletes = [] for k,v in context.items(): try: del v['@value'] except KeyError: pass # Doesn't have the value except TypeError: # Is actually completely empty, and has a scalar value. Delete and re-create deletes.append(k) if isinstance(v, str): # Shouldn't ever happen, but who knows ? deletes.append(k) for d in deletes: try: del context[d] except KeyError: # Fails in TravisCI, no idea why. pass for ms in mandatory_sections: if not ms in context: context[ms] = {} # Load inline documentation inline = '' for d in context.get('documentation',{}).get('documentation',[]): u = parse_app_url(d['url']) if u.target_format == 'md': # The README.md file inline = '' if u.proto == 'file': # File really ought to be relative t = doc.package_url.join_target(u).get_resource().get_target() else: try: t = u.get_resource().get_target() except DownloadError as e: raise e try: with open(t.fspath) as f: inline += f.read() except FileNotFoundError: pass del d['title'] # Will cause it to be ignored in next section # Strip off the leading title, if it exists, because it will be re-applied # by the templates import re lines = inline.strip().splitlines() if lines and lines[0].startswith('# '): lines = lines[1:] context['inline_doc'] = '\n'.join(lines) # Convert doc section doc_links = {} images = {} for term_name, terms in context['documentation'].items(): if term_name == 'note': context['notes'] = terms else: for i, term in enumerate(terms): try: if term_name == 'image': images[term['title']] = term else: doc_links[term['title']] = term except AttributeError: # A scalar pass # There should not be any scalars in the documentation section except KeyError: pass # ignore entries without titles except TypeError: pass # Also probably a ascalar context['doc_links'] = doc_links context['images'] = images del context['documentation'] # # Update contacts origin = None for term_name, terms in context['contacts'].items(): if isinstance(terms, dict): origin = terms # Origin is a scalar in roort, must be converted to sequence here else: for t in terms: try: t.update(process_contact(t)) except AttributeError: pass # Probably got a scalar if origin: origin.update(process_contact(origin)) context['contacts']['origin'] = [origin] # For resources and references, convert scalars into lists of dicts, which are the # default for Datafiles and References. for section in ('references', 'resources'): for term_key, term_vals in context.get(section,{}).items(): if isinstance(term_vals, dict): if '@value' in term_vals: term_vals['url'] = term_vals['@value'] del term_vals['@value'] new_term_vals = [term_vals] elif isinstance(term_vals, list): new_term_vals = None else: new_term_vals = [ {'url': term_vals, 'name': term_vals}] if new_term_vals: context[section][term_key] = new_term_vals context['distributions'] = {} for dist in doc.find('Root.Distribution'): context['distributions'][dist.type] = dist.value if doc.find('Root.Giturl'): context['distributions']['source'] = doc.get_value('Root.Giturl') return context
[ "def", "display_context", "(", "doc", ")", ":", "from", "rowgenerators", ".", "exceptions", "import", "DownloadError", "context", "=", "{", "s", ".", "name", ".", "lower", "(", ")", ":", "s", ".", "as_dict", "(", ")", "for", "s", "in", "doc", "if", "s", ".", "name", ".", "lower", "(", ")", "!=", "'schema'", "}", "#import json", "#print(json.dumps(context, indent=4))", "mandatory_sections", "=", "[", "'documentation'", ",", "'contacts'", "]", "# Remove section names", "deletes", "=", "[", "]", "for", "k", ",", "v", "in", "context", ".", "items", "(", ")", ":", "try", ":", "del", "v", "[", "'@value'", "]", "except", "KeyError", ":", "pass", "# Doesn't have the value", "except", "TypeError", ":", "# Is actually completely empty, and has a scalar value. Delete and re-create", "deletes", ".", "append", "(", "k", ")", "if", "isinstance", "(", "v", ",", "str", ")", ":", "# Shouldn't ever happen, but who knows ?", "deletes", ".", "append", "(", "k", ")", "for", "d", "in", "deletes", ":", "try", ":", "del", "context", "[", "d", "]", "except", "KeyError", ":", "# Fails in TravisCI, no idea why.", "pass", "for", "ms", "in", "mandatory_sections", ":", "if", "not", "ms", "in", "context", ":", "context", "[", "ms", "]", "=", "{", "}", "# Load inline documentation", "inline", "=", "''", "for", "d", "in", "context", ".", "get", "(", "'documentation'", ",", "{", "}", ")", ".", "get", "(", "'documentation'", ",", "[", "]", ")", ":", "u", "=", "parse_app_url", "(", "d", "[", "'url'", "]", ")", "if", "u", ".", "target_format", "==", "'md'", ":", "# The README.md file", "inline", "=", "''", "if", "u", ".", "proto", "==", "'file'", ":", "# File really ought to be relative", "t", "=", "doc", ".", "package_url", ".", "join_target", "(", "u", ")", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "else", ":", "try", ":", "t", "=", "u", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "except", "DownloadError", "as", "e", ":", "raise", "e", "try", ":", "with", "open", "(", "t", ".", "fspath", ")", "as", "f", ":", "inline", "+=", "f", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "pass", "del", "d", "[", "'title'", "]", "# Will cause it to be ignored in next section", "# Strip off the leading title, if it exists, because it will be re-applied", "# by the templates", "import", "re", "lines", "=", "inline", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "if", "lines", "and", "lines", "[", "0", "]", ".", "startswith", "(", "'# '", ")", ":", "lines", "=", "lines", "[", "1", ":", "]", "context", "[", "'inline_doc'", "]", "=", "'\\n'", ".", "join", "(", "lines", ")", "# Convert doc section", "doc_links", "=", "{", "}", "images", "=", "{", "}", "for", "term_name", ",", "terms", "in", "context", "[", "'documentation'", "]", ".", "items", "(", ")", ":", "if", "term_name", "==", "'note'", ":", "context", "[", "'notes'", "]", "=", "terms", "else", ":", "for", "i", ",", "term", "in", "enumerate", "(", "terms", ")", ":", "try", ":", "if", "term_name", "==", "'image'", ":", "images", "[", "term", "[", "'title'", "]", "]", "=", "term", "else", ":", "doc_links", "[", "term", "[", "'title'", "]", "]", "=", "term", "except", "AttributeError", ":", "# A scalar", "pass", "# There should not be any scalars in the documentation section", "except", "KeyError", ":", "pass", "# ignore entries without titles", "except", "TypeError", ":", "pass", "# Also probably a ascalar", "context", "[", "'doc_links'", "]", "=", "doc_links", "context", "[", "'images'", "]", "=", "images", "del", "context", "[", "'documentation'", "]", "#", "# Update contacts", "origin", "=", "None", "for", "term_name", ",", "terms", "in", "context", "[", "'contacts'", "]", ".", "items", "(", ")", ":", "if", "isinstance", "(", "terms", ",", "dict", ")", ":", "origin", "=", "terms", "# Origin is a scalar in roort, must be converted to sequence here", "else", ":", "for", "t", "in", "terms", ":", "try", ":", "t", ".", "update", "(", "process_contact", "(", "t", ")", ")", "except", "AttributeError", ":", "pass", "# Probably got a scalar", "if", "origin", ":", "origin", ".", "update", "(", "process_contact", "(", "origin", ")", ")", "context", "[", "'contacts'", "]", "[", "'origin'", "]", "=", "[", "origin", "]", "# For resources and references, convert scalars into lists of dicts, which are the", "# default for Datafiles and References.", "for", "section", "in", "(", "'references'", ",", "'resources'", ")", ":", "for", "term_key", ",", "term_vals", "in", "context", ".", "get", "(", "section", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "term_vals", ",", "dict", ")", ":", "if", "'@value'", "in", "term_vals", ":", "term_vals", "[", "'url'", "]", "=", "term_vals", "[", "'@value'", "]", "del", "term_vals", "[", "'@value'", "]", "new_term_vals", "=", "[", "term_vals", "]", "elif", "isinstance", "(", "term_vals", ",", "list", ")", ":", "new_term_vals", "=", "None", "else", ":", "new_term_vals", "=", "[", "{", "'url'", ":", "term_vals", ",", "'name'", ":", "term_vals", "}", "]", "if", "new_term_vals", ":", "context", "[", "section", "]", "[", "term_key", "]", "=", "new_term_vals", "context", "[", "'distributions'", "]", "=", "{", "}", "for", "dist", "in", "doc", ".", "find", "(", "'Root.Distribution'", ")", ":", "context", "[", "'distributions'", "]", "[", "dist", ".", "type", "]", "=", "dist", ".", "value", "if", "doc", ".", "find", "(", "'Root.Giturl'", ")", ":", "context", "[", "'distributions'", "]", "[", "'source'", "]", "=", "doc", ".", "get_value", "(", "'Root.Giturl'", ")", "return", "context" ]
Create a Jinja context for display
[ "Create", "a", "Jinja", "context", "for", "display" ]
python
train
tanghaibao/goatools
goatools/pvalcalc.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/pvalcalc.py#L90-L101
def _init_pval_obj(self): """Returns a Fisher object based on user-input.""" if self.pval_fnc_name in self.options.keys(): try: fisher_obj = self.options[self.pval_fnc_name](self.pval_fnc_name, self.log) except ImportError: print("fisher module not installed. Falling back on scipy.stats.fisher_exact") fisher_obj = self.options['fisher_scipy_stats']('fisher_scipy_stats', self.log) return fisher_obj raise Exception("PVALUE FUNCTION({FNC}) NOT FOUND".format(FNC=self.pval_fnc_name))
[ "def", "_init_pval_obj", "(", "self", ")", ":", "if", "self", ".", "pval_fnc_name", "in", "self", ".", "options", ".", "keys", "(", ")", ":", "try", ":", "fisher_obj", "=", "self", ".", "options", "[", "self", ".", "pval_fnc_name", "]", "(", "self", ".", "pval_fnc_name", ",", "self", ".", "log", ")", "except", "ImportError", ":", "print", "(", "\"fisher module not installed. Falling back on scipy.stats.fisher_exact\"", ")", "fisher_obj", "=", "self", ".", "options", "[", "'fisher_scipy_stats'", "]", "(", "'fisher_scipy_stats'", ",", "self", ".", "log", ")", "return", "fisher_obj", "raise", "Exception", "(", "\"PVALUE FUNCTION({FNC}) NOT FOUND\"", ".", "format", "(", "FNC", "=", "self", ".", "pval_fnc_name", ")", ")" ]
Returns a Fisher object based on user-input.
[ "Returns", "a", "Fisher", "object", "based", "on", "user", "-", "input", "." ]
python
train
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L102-L121
def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
[ "def", "makedoedict", "(", "str1", ")", ":", "blocklist", "=", "str1", ".", "split", "(", "'..'", ")", "blocklist", "=", "blocklist", "[", ":", "-", "1", "]", "#remove empty item after last '..'", "blockdict", "=", "{", "}", "belongsdict", "=", "{", "}", "for", "num", "in", "range", "(", "0", ",", "len", "(", "blocklist", ")", ")", ":", "blocklist", "[", "num", "]", "=", "blocklist", "[", "num", "]", ".", "strip", "(", ")", "linelist", "=", "blocklist", "[", "num", "]", ".", "split", "(", "os", ".", "linesep", ")", "aline", "=", "linelist", "[", "0", "]", "alinelist", "=", "aline", ".", "split", "(", "'='", ")", "name", "=", "alinelist", "[", "0", "]", ".", "strip", "(", ")", "aline", "=", "linelist", "[", "1", "]", "alinelist", "=", "aline", ".", "split", "(", "'='", ")", "belongs", "=", "alinelist", "[", "-", "1", "]", ".", "strip", "(", ")", "theblock", "=", "blocklist", "[", "num", "]", "+", "os", ".", "linesep", "+", "'..'", "+", "os", ".", "linesep", "+", "os", ".", "linesep", "#put the '..' back in the block", "blockdict", "[", "name", "]", "=", "theblock", "belongsdict", "[", "name", "]", "=", "belongs", "return", "[", "blockdict", ",", "belongsdict", "]" ]
makedoedict
[ "makedoedict" ]
python
train
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L469-L477
def sell(self, account_id, **params): """https://developers.coinbase.com/api/v2#sell-bitcoin""" if 'amount' not in params and 'total' not in params: raise ValueError("Missing required parameter: 'amount' or 'total'") for required in ['currency']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'accounts', account_id, 'sells', data=params) return self._make_api_object(response, Sell)
[ "def", "sell", "(", "self", ",", "account_id", ",", "*", "*", "params", ")", ":", "if", "'amount'", "not", "in", "params", "and", "'total'", "not", "in", "params", ":", "raise", "ValueError", "(", "\"Missing required parameter: 'amount' or 'total'\"", ")", "for", "required", "in", "[", "'currency'", "]", ":", "if", "required", "not", "in", "params", ":", "raise", "ValueError", "(", "\"Missing required parameter: %s\"", "%", "required", ")", "response", "=", "self", ".", "_post", "(", "'v2'", ",", "'accounts'", ",", "account_id", ",", "'sells'", ",", "data", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "Sell", ")" ]
https://developers.coinbase.com/api/v2#sell-bitcoin
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#sell", "-", "bitcoin" ]
python
train
theislab/scanpy
scanpy/tools/_dpt.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_dpt.py#L182-L262
def detect_branchings(self): """Detect all branchings up to `n_branchings`. Writes Attributes ----------------- segs : np.ndarray List of integer index arrays. segs_tips : np.ndarray List of indices of the tips of segments. """ logg.m(' detect', self.n_branchings, 'branching' + ('' if self.n_branchings == 1 else 's')) # a segment is a subset of points of the data set (defined by the # indices of the points in the segment) # initialize the search for branchings with a single segment, # that is, get the indices of the whole data set indices_all = np.arange(self._adata.shape[0], dtype=int) # let's keep a list of segments, the first segment to add is the # whole data set segs = [indices_all] # a segment can as well be defined by the two points that have maximal # distance in the segment, the "tips" of the segment # # the rest of the points in the segment is then defined by demanding # them to "be close to the line segment that connects the tips", that # is, for such a point, the normalized added distance to both tips is # smaller than one: # (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1 # of course, this condition is fulfilled by the full cylindrical # subspace surrounding that line segment, where the radius of the # cylinder can be infinite # # if D denotes a euclidian distance matrix, a line segment is a linear # object, and the name "line" is justified. if we take the # diffusion-based distance matrix Dchosen, which approximates geodesic # distance, with "line", we mean the shortest path between two points, # which can be highly non-linear in the original space # # let us define the tips of the whole data set if False: # this is safe, but not compatible with on-the-fly computation tips_all = np.array(np.unravel_index(np.argmax(self.distances_dpt), self.distances_dpt.shape)) else: if self.iroot is not None: tip_0 = np.argmax(self.distances_dpt[self.iroot]) else: tip_0 = np.argmax(self.distances_dpt[0]) tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])]) # we keep a list of the tips of each segment segs_tips = [tips_all] segs_connects = [[]] segs_undecided = [True] segs_adjacency = [[]] logg.m(' do not consider groups with less than {} points for splitting' .format(self.min_group_size)) for ibranch in range(self.n_branchings): iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided) if iseg == -1: logg.m(' partitioning converged') break logg.m(' branching {}:'.format(ibranch + 1), 'split group', iseg) # [third start end] # detect branching and update segs and segs_tips self.detect_branching(segs, segs_tips, segs_connects, segs_undecided, segs_adjacency, iseg, tips3) # store as class members self.segs = segs self.segs_tips = segs_tips self.segs_undecided = segs_undecided # the following is a bit too much, but this allows easy storage self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float) self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int) for i, seg_adjacency in enumerate(segs_adjacency): self.segs_connects[i, seg_adjacency] = segs_connects[i] for i in range(len(segs)): for j in range(len(segs)): self.segs_adjacency[i, j] = self.distances_dpt[self.segs_connects[i, j], self.segs_connects[j, i]] self.segs_adjacency = self.segs_adjacency.tocsr() self.segs_connects = self.segs_connects.tocsr()
[ "def", "detect_branchings", "(", "self", ")", ":", "logg", ".", "m", "(", "' detect'", ",", "self", ".", "n_branchings", ",", "'branching'", "+", "(", "''", "if", "self", ".", "n_branchings", "==", "1", "else", "'s'", ")", ")", "# a segment is a subset of points of the data set (defined by the", "# indices of the points in the segment)", "# initialize the search for branchings with a single segment,", "# that is, get the indices of the whole data set", "indices_all", "=", "np", ".", "arange", "(", "self", ".", "_adata", ".", "shape", "[", "0", "]", ",", "dtype", "=", "int", ")", "# let's keep a list of segments, the first segment to add is the", "# whole data set", "segs", "=", "[", "indices_all", "]", "# a segment can as well be defined by the two points that have maximal", "# distance in the segment, the \"tips\" of the segment", "#", "# the rest of the points in the segment is then defined by demanding", "# them to \"be close to the line segment that connects the tips\", that", "# is, for such a point, the normalized added distance to both tips is", "# smaller than one:", "# (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1", "# of course, this condition is fulfilled by the full cylindrical", "# subspace surrounding that line segment, where the radius of the", "# cylinder can be infinite", "#", "# if D denotes a euclidian distance matrix, a line segment is a linear", "# object, and the name \"line\" is justified. if we take the", "# diffusion-based distance matrix Dchosen, which approximates geodesic", "# distance, with \"line\", we mean the shortest path between two points,", "# which can be highly non-linear in the original space", "#", "# let us define the tips of the whole data set", "if", "False", ":", "# this is safe, but not compatible with on-the-fly computation", "tips_all", "=", "np", ".", "array", "(", "np", ".", "unravel_index", "(", "np", ".", "argmax", "(", "self", ".", "distances_dpt", ")", ",", "self", ".", "distances_dpt", ".", "shape", ")", ")", "else", ":", "if", "self", ".", "iroot", "is", "not", "None", ":", "tip_0", "=", "np", ".", "argmax", "(", "self", ".", "distances_dpt", "[", "self", ".", "iroot", "]", ")", "else", ":", "tip_0", "=", "np", ".", "argmax", "(", "self", ".", "distances_dpt", "[", "0", "]", ")", "tips_all", "=", "np", ".", "array", "(", "[", "tip_0", ",", "np", ".", "argmax", "(", "self", ".", "distances_dpt", "[", "tip_0", "]", ")", "]", ")", "# we keep a list of the tips of each segment", "segs_tips", "=", "[", "tips_all", "]", "segs_connects", "=", "[", "[", "]", "]", "segs_undecided", "=", "[", "True", "]", "segs_adjacency", "=", "[", "[", "]", "]", "logg", ".", "m", "(", "' do not consider groups with less than {} points for splitting'", ".", "format", "(", "self", ".", "min_group_size", ")", ")", "for", "ibranch", "in", "range", "(", "self", ".", "n_branchings", ")", ":", "iseg", ",", "tips3", "=", "self", ".", "select_segment", "(", "segs", ",", "segs_tips", ",", "segs_undecided", ")", "if", "iseg", "==", "-", "1", ":", "logg", ".", "m", "(", "' partitioning converged'", ")", "break", "logg", ".", "m", "(", "' branching {}:'", ".", "format", "(", "ibranch", "+", "1", ")", ",", "'split group'", ",", "iseg", ")", "# [third start end]", "# detect branching and update segs and segs_tips", "self", ".", "detect_branching", "(", "segs", ",", "segs_tips", ",", "segs_connects", ",", "segs_undecided", ",", "segs_adjacency", ",", "iseg", ",", "tips3", ")", "# store as class members", "self", ".", "segs", "=", "segs", "self", ".", "segs_tips", "=", "segs_tips", "self", ".", "segs_undecided", "=", "segs_undecided", "# the following is a bit too much, but this allows easy storage", "self", ".", "segs_adjacency", "=", "sp", ".", "sparse", ".", "lil_matrix", "(", "(", "len", "(", "segs", ")", ",", "len", "(", "segs", ")", ")", ",", "dtype", "=", "float", ")", "self", ".", "segs_connects", "=", "sp", ".", "sparse", ".", "lil_matrix", "(", "(", "len", "(", "segs", ")", ",", "len", "(", "segs", ")", ")", ",", "dtype", "=", "int", ")", "for", "i", ",", "seg_adjacency", "in", "enumerate", "(", "segs_adjacency", ")", ":", "self", ".", "segs_connects", "[", "i", ",", "seg_adjacency", "]", "=", "segs_connects", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "segs", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "segs", ")", ")", ":", "self", ".", "segs_adjacency", "[", "i", ",", "j", "]", "=", "self", ".", "distances_dpt", "[", "self", ".", "segs_connects", "[", "i", ",", "j", "]", ",", "self", ".", "segs_connects", "[", "j", ",", "i", "]", "]", "self", ".", "segs_adjacency", "=", "self", ".", "segs_adjacency", ".", "tocsr", "(", ")", "self", ".", "segs_connects", "=", "self", ".", "segs_connects", ".", "tocsr", "(", ")" ]
Detect all branchings up to `n_branchings`. Writes Attributes ----------------- segs : np.ndarray List of integer index arrays. segs_tips : np.ndarray List of indices of the tips of segments.
[ "Detect", "all", "branchings", "up", "to", "n_branchings", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/client.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/client.py#L95-L108
def get_processing_block_ids(self): """Get list of processing block ids using the processing block id""" # Initialise empty list _processing_block_ids = [] # Pattern used to search processing block ids pattern = '*:processing_block:*' block_ids = self._db.get_ids(pattern) for block_id in block_ids: id_split = block_id.split(':')[-1] _processing_block_ids.append(id_split) return sorted(_processing_block_ids)
[ "def", "get_processing_block_ids", "(", "self", ")", ":", "# Initialise empty list", "_processing_block_ids", "=", "[", "]", "# Pattern used to search processing block ids", "pattern", "=", "'*:processing_block:*'", "block_ids", "=", "self", ".", "_db", ".", "get_ids", "(", "pattern", ")", "for", "block_id", "in", "block_ids", ":", "id_split", "=", "block_id", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "_processing_block_ids", ".", "append", "(", "id_split", ")", "return", "sorted", "(", "_processing_block_ids", ")" ]
Get list of processing block ids using the processing block id
[ "Get", "list", "of", "processing", "block", "ids", "using", "the", "processing", "block", "id" ]
python
train
sparklingpandas/sparklingpandas
sparklingpandas/prdd.py
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/prdd.py#L49-L53
def applymap(self, func, **kwargs): """Return a new PRDD by applying a function to each element of each pandas DataFrame.""" return self.from_rdd( self._rdd.map(lambda data: data.applymap(func), **kwargs))
[ "def", "applymap", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "from_rdd", "(", "self", ".", "_rdd", ".", "map", "(", "lambda", "data", ":", "data", ".", "applymap", "(", "func", ")", ",", "*", "*", "kwargs", ")", ")" ]
Return a new PRDD by applying a function to each element of each pandas DataFrame.
[ "Return", "a", "new", "PRDD", "by", "applying", "a", "function", "to", "each", "element", "of", "each", "pandas", "DataFrame", "." ]
python
train
automl/HpBandSter
hpbandster/core/base_iteration.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/base_iteration.py#L141-L176
def get_next_run(self): """ function to return the next configuration and budget to run. This function is called from HB_master, don't call this from your script. It returns None if this run of SH is finished or there are pending jobs that need to finish to progress to the next stage. If there are empty slots to be filled in the current SH stage (which never happens in the original SH version), a new configuration will be sampled and scheduled to run next. """ if self.is_finished: return(None) for k,v in self.data.items(): if v.status == 'QUEUED': assert v.budget == self.budgets[self.stage], 'Configuration budget does not align with current stage!' v.status = 'RUNNING' self.num_running += 1 return(k, v.config, v.budget) # check if there are still slots to fill in the current stage and return that if (self.actual_num_configs[self.stage] < self.num_configs[self.stage]): self.add_configuration() return(self.get_next_run()) if self.num_running == 0: # at this point a stage is completed self.process_results() return(self.get_next_run()) return(None)
[ "def", "get_next_run", "(", "self", ")", ":", "if", "self", ".", "is_finished", ":", "return", "(", "None", ")", "for", "k", ",", "v", "in", "self", ".", "data", ".", "items", "(", ")", ":", "if", "v", ".", "status", "==", "'QUEUED'", ":", "assert", "v", ".", "budget", "==", "self", ".", "budgets", "[", "self", ".", "stage", "]", ",", "'Configuration budget does not align with current stage!'", "v", ".", "status", "=", "'RUNNING'", "self", ".", "num_running", "+=", "1", "return", "(", "k", ",", "v", ".", "config", ",", "v", ".", "budget", ")", "# check if there are still slots to fill in the current stage and return that", "if", "(", "self", ".", "actual_num_configs", "[", "self", ".", "stage", "]", "<", "self", ".", "num_configs", "[", "self", ".", "stage", "]", ")", ":", "self", ".", "add_configuration", "(", ")", "return", "(", "self", ".", "get_next_run", "(", ")", ")", "if", "self", ".", "num_running", "==", "0", ":", "# at this point a stage is completed", "self", ".", "process_results", "(", ")", "return", "(", "self", ".", "get_next_run", "(", ")", ")", "return", "(", "None", ")" ]
function to return the next configuration and budget to run. This function is called from HB_master, don't call this from your script. It returns None if this run of SH is finished or there are pending jobs that need to finish to progress to the next stage. If there are empty slots to be filled in the current SH stage (which never happens in the original SH version), a new configuration will be sampled and scheduled to run next.
[ "function", "to", "return", "the", "next", "configuration", "and", "budget", "to", "run", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/models/function.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/function.py#L97-L107
def update(self): """ Update this function. :return: None """ if self._owner_changed: self.update_owner(self.owner) self._resources = [res.name for res in self.resources] return self.parent.update(self)
[ "def", "update", "(", "self", ")", ":", "if", "self", ".", "_owner_changed", ":", "self", ".", "update_owner", "(", "self", ".", "owner", ")", "self", ".", "_resources", "=", "[", "res", ".", "name", "for", "res", "in", "self", ".", "resources", "]", "return", "self", ".", "parent", ".", "update", "(", "self", ")" ]
Update this function. :return: None
[ "Update", "this", "function", "." ]
python
train
guykisel/inline-plz
inlineplz/main.py
https://github.com/guykisel/inline-plz/blob/b5b1744e9156e31f68b519c0d8022feff79888ae/inlineplz/main.py#L102-L143
def load_config(args, config_path=".inlineplz.yml"): """Load inline-plz config from yaml config file with reasonable defaults.""" config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
[ "def", "load_config", "(", "args", ",", "config_path", "=", "\".inlineplz.yml\"", ")", ":", "config", "=", "{", "}", "try", ":", "with", "open", "(", "config_path", ")", "as", "configfile", ":", "config", "=", "yaml", ".", "safe_load", "(", "configfile", ")", "or", "{", "}", "if", "config", ":", "print", "(", "\"Loaded config from {}\"", ".", "format", "(", "config_path", ")", ")", "pprint", ".", "pprint", "(", "config", ")", "except", "(", "IOError", ",", "OSError", ",", "yaml", ".", "parser", ".", "ParserError", ")", ":", "traceback", ".", "print_exc", "(", ")", "args", "=", "update_from_config", "(", "args", ",", "config", ")", "args", ".", "ignore_paths", "=", "args", ".", "__dict__", ".", "get", "(", "\"ignore_paths\"", ")", "or", "[", "\"node_modules\"", ",", "\".git\"", ",", "\".tox\"", ",", "\"godeps\"", ",", "\"vendor\"", ",", "\"site-packages\"", ",", "\"venv\"", ",", "\".env\"", ",", "\"spec\"", ",", "\"migrate\"", ",", "\"bin\"", ",", "\"fixtures\"", ",", "\"cassettes\"", ",", "\".cache\"", ",", "\".idea\"", ",", "\".pytest_cache\"", ",", "\"__pycache__\"", ",", "\"dist\"", ",", "]", "if", "config_path", "!=", "\".inlineplz.yml\"", ":", "return", "args", "# fall back to config_dir inlineplz yaml if we didn't find one locally", "if", "args", ".", "config_dir", "and", "not", "config", ":", "new_config_path", "=", "os", ".", "path", ".", "join", "(", "args", ".", "config_dir", ",", "config_path", ")", "if", "os", ".", "path", ".", "exists", "(", "new_config_path", ")", ":", "return", "load_config", "(", "args", ",", "new_config_path", ")", "return", "args" ]
Load inline-plz config from yaml config file with reasonable defaults.
[ "Load", "inline", "-", "plz", "config", "from", "yaml", "config", "file", "with", "reasonable", "defaults", "." ]
python
train
talkincode/toughlib
toughlib/permit.py
https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/permit.py#L105-L112
def check_opr_category(self, opr, category): """ 检查权限是否在指定目录下 """ for path in self.routes: route = self.routes[path] if opr in route['oprs'] and route['category'] == category: return True return False
[ "def", "check_opr_category", "(", "self", ",", "opr", ",", "category", ")", ":", "for", "path", "in", "self", ".", "routes", ":", "route", "=", "self", ".", "routes", "[", "path", "]", "if", "opr", "in", "route", "[", "'oprs'", "]", "and", "route", "[", "'category'", "]", "==", "category", ":", "return", "True", "return", "False" ]
检查权限是否在指定目录下
[ "检查权限是否在指定目录下" ]
python
train
couchbase/couchbase-python-client
couchbase/connstr.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/connstr.py#L173-L181
def convert_1x_args(bucket, **kwargs): """ Converts arguments for 1.x constructors to their 2.x forms """ host = kwargs.pop('host', 'localhost') port = kwargs.pop('port', None) if not 'connstr' in kwargs and 'connection_string' not in kwargs: kwargs['connection_string'] = _build_connstr(host, port, bucket) return kwargs
[ "def", "convert_1x_args", "(", "bucket", ",", "*", "*", "kwargs", ")", ":", "host", "=", "kwargs", ".", "pop", "(", "'host'", ",", "'localhost'", ")", "port", "=", "kwargs", ".", "pop", "(", "'port'", ",", "None", ")", "if", "not", "'connstr'", "in", "kwargs", "and", "'connection_string'", "not", "in", "kwargs", ":", "kwargs", "[", "'connection_string'", "]", "=", "_build_connstr", "(", "host", ",", "port", ",", "bucket", ")", "return", "kwargs" ]
Converts arguments for 1.x constructors to their 2.x forms
[ "Converts", "arguments", "for", "1", ".", "x", "constructors", "to", "their", "2", ".", "x", "forms" ]
python
train
sassoftware/saspy
saspy/sasets.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasets.py#L393-L441
def varmax(self, data: ['SASdata', str] = None, bound: str = None, by: [str, list] = None, causal: str = None, cointeg: str = None, condfore: str = None, garch: str = None, id: [str, list] = None, initial: str = None, model: str = None, nloptions: str = None, output: [str, bool, 'SASdata'] = None, restrict: str = None, test: str = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the VARMAX procedure Documentation link: https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_varmax_syntax.htm&locale=en :param data: SASdata object or string. This parameter is required. :parm adjust: The adjust variable can only be a string type. :parm arima: The arima variable can only be a string type. :parm automdl: The automdl variable can only be a string type. :parm by: The by variable can be a string or list type. :parm check: The check variable can only be a string type. :parm estimate: The estimate variable can only be a string type. :parm event: The event variable can only be a string type. :parm forecast: The forecast variable can only be a string type. :parm id: The id variable can be a string or list type. :parm identify: The identify variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm outlier: The outlier variable can only be a string type. :parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output". :parm pickmdl: The pickmdl variable can only be a string type. :parm regression: The regression variable can only be a string type. :parm seatsdecomp: The seatsdecomp variable can only be a string type. :parm tables: The tables variable can only be a string type. :parm transform: The transform variable can only be a string type. :parm userdefined: The userdefined variable can only be a string type. :parm var: The var variable can only be a string type. :parm x11: The x11 variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "varmax", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "bound", ":", "str", "=", "None", ",", "by", ":", "[", "str", ",", "list", "]", "=", "None", ",", "causal", ":", "str", "=", "None", ",", "cointeg", ":", "str", "=", "None", ",", "condfore", ":", "str", "=", "None", ",", "garch", ":", "str", "=", "None", ",", "id", ":", "[", "str", ",", "list", "]", "=", "None", ",", "initial", ":", "str", "=", "None", ",", "model", ":", "str", "=", "None", ",", "nloptions", ":", "str", "=", "None", ",", "output", ":", "[", "str", ",", "bool", ",", "'SASdata'", "]", "=", "None", ",", "restrict", ":", "str", "=", "None", ",", "test", ":", "str", "=", "None", ",", "procopts", ":", "str", "=", "None", ",", "stmtpassthrough", ":", "str", "=", "None", ",", "*", "*", "kwargs", ":", "dict", ")", "->", "'SASresults'", ":" ]
Python method to call the VARMAX procedure Documentation link: https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_varmax_syntax.htm&locale=en :param data: SASdata object or string. This parameter is required. :parm adjust: The adjust variable can only be a string type. :parm arima: The arima variable can only be a string type. :parm automdl: The automdl variable can only be a string type. :parm by: The by variable can be a string or list type. :parm check: The check variable can only be a string type. :parm estimate: The estimate variable can only be a string type. :parm event: The event variable can only be a string type. :parm forecast: The forecast variable can only be a string type. :parm id: The id variable can be a string or list type. :parm identify: The identify variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm outlier: The outlier variable can only be a string type. :parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output". :parm pickmdl: The pickmdl variable can only be a string type. :parm regression: The regression variable can only be a string type. :parm seatsdecomp: The seatsdecomp variable can only be a string type. :parm tables: The tables variable can only be a string type. :parm transform: The transform variable can only be a string type. :parm userdefined: The userdefined variable can only be a string type. :parm var: The var variable can only be a string type. :parm x11: The x11 variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
[ "Python", "method", "to", "call", "the", "VARMAX", "procedure" ]
python
train
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/DataPoint.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/DataPoint.py#L193-L202
def std_dev(self, value): """The std_dev property. Args: value (float). the property value. """ if value == self._defaults['stdDev'] and 'stdDev' in self._values: del self._values['stdDev'] else: self._values['stdDev'] = value
[ "def", "std_dev", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'stdDev'", "]", "and", "'stdDev'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'stdDev'", "]", "else", ":", "self", ".", "_values", "[", "'stdDev'", "]", "=", "value" ]
The std_dev property. Args: value (float). the property value.
[ "The", "std_dev", "property", ".", "Args", ":", "value", "(", "float", ")", ".", "the", "property", "value", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_mode.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_mode.py#L50-L79
def cmd_guided(self, args): '''set GUIDED target''' if len(args) != 1 and len(args) != 3: print("Usage: guided ALTITUDE | guided LAT LON ALTITUDE") return if len(args) == 3: latitude = float(args[0]) longitude = float(args[1]) altitude = float(args[2]) latlon = (latitude, longitude) else: try: latlon = self.module('map').click_position except Exception: print("No map available") return if latlon is None: print("No map click position available") return altitude = float(args[0]) print("Guided %s %s" % (str(latlon), str(altitude))) self.master.mav.mission_item_send (self.settings.target_system, self.settings.target_component, 0, self.module('wp').get_default_frame(), mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 2, 0, 0, 0, 0, 0, latlon[0], latlon[1], altitude)
[ "def", "cmd_guided", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", "and", "len", "(", "args", ")", "!=", "3", ":", "print", "(", "\"Usage: guided ALTITUDE | guided LAT LON ALTITUDE\"", ")", "return", "if", "len", "(", "args", ")", "==", "3", ":", "latitude", "=", "float", "(", "args", "[", "0", "]", ")", "longitude", "=", "float", "(", "args", "[", "1", "]", ")", "altitude", "=", "float", "(", "args", "[", "2", "]", ")", "latlon", "=", "(", "latitude", ",", "longitude", ")", "else", ":", "try", ":", "latlon", "=", "self", ".", "module", "(", "'map'", ")", ".", "click_position", "except", "Exception", ":", "print", "(", "\"No map available\"", ")", "return", "if", "latlon", "is", "None", ":", "print", "(", "\"No map click position available\"", ")", "return", "altitude", "=", "float", "(", "args", "[", "0", "]", ")", "print", "(", "\"Guided %s %s\"", "%", "(", "str", "(", "latlon", ")", ",", "str", "(", "altitude", ")", ")", ")", "self", ".", "master", ".", "mav", ".", "mission_item_send", "(", "self", ".", "settings", ".", "target_system", ",", "self", ".", "settings", ".", "target_component", ",", "0", ",", "self", ".", "module", "(", "'wp'", ")", ".", "get_default_frame", "(", ")", ",", "mavutil", ".", "mavlink", ".", "MAV_CMD_NAV_WAYPOINT", ",", "2", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "latlon", "[", "0", "]", ",", "latlon", "[", "1", "]", ",", "altitude", ")" ]
set GUIDED target
[ "set", "GUIDED", "target" ]
python
train
morpframework/morpfw
morpfw/interfaces.py
https://github.com/morpframework/morpfw/blob/803fbf29714e6f29456482f1cfbdbd4922b020b0/morpfw/interfaces.py#L168-L172
def aggregate(self, query: Optional[dict] = None, group: Optional[dict] = None, order_by: Union[None, list, tuple] = None) -> list: """return aggregation result based on specified rulez query and group""" raise NotImplementedError
[ "def", "aggregate", "(", "self", ",", "query", ":", "Optional", "[", "dict", "]", "=", "None", ",", "group", ":", "Optional", "[", "dict", "]", "=", "None", ",", "order_by", ":", "Union", "[", "None", ",", "list", ",", "tuple", "]", "=", "None", ")", "->", "list", ":", "raise", "NotImplementedError" ]
return aggregation result based on specified rulez query and group
[ "return", "aggregation", "result", "based", "on", "specified", "rulez", "query", "and", "group" ]
python
train
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/options/mckernel.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/options/mckernel.py#L1-L43
def price_options(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000): """ Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices. """ import numpy as np from math import exp,sqrt h = 1.0/days const1 = exp((r-0.5*sigma**2)*h) const2 = sigma*sqrt(h) stock_price = S*np.ones(paths, dtype='float64') stock_price_sum = np.zeros(paths, dtype='float64') for j in range(days): growth_factor = const1*np.exp(const2*np.random.standard_normal(paths)) stock_price = stock_price*growth_factor stock_price_sum = stock_price_sum + stock_price stock_price_avg = stock_price_sum/days zeros = np.zeros(paths, dtype='float64') r_factor = exp(-r*h*days) euro_put = r_factor*np.mean(np.maximum(zeros, K-stock_price)) asian_put = r_factor*np.mean(np.maximum(zeros, K-stock_price_avg)) euro_call = r_factor*np.mean(np.maximum(zeros, stock_price-K)) asian_call = r_factor*np.mean(np.maximum(zeros, stock_price_avg-K)) return (euro_call, euro_put, asian_call, asian_put)
[ "def", "price_options", "(", "S", "=", "100.0", ",", "K", "=", "100.0", ",", "sigma", "=", "0.25", ",", "r", "=", "0.05", ",", "days", "=", "260", ",", "paths", "=", "10000", ")", ":", "import", "numpy", "as", "np", "from", "math", "import", "exp", ",", "sqrt", "h", "=", "1.0", "/", "days", "const1", "=", "exp", "(", "(", "r", "-", "0.5", "*", "sigma", "**", "2", ")", "*", "h", ")", "const2", "=", "sigma", "*", "sqrt", "(", "h", ")", "stock_price", "=", "S", "*", "np", ".", "ones", "(", "paths", ",", "dtype", "=", "'float64'", ")", "stock_price_sum", "=", "np", ".", "zeros", "(", "paths", ",", "dtype", "=", "'float64'", ")", "for", "j", "in", "range", "(", "days", ")", ":", "growth_factor", "=", "const1", "*", "np", ".", "exp", "(", "const2", "*", "np", ".", "random", ".", "standard_normal", "(", "paths", ")", ")", "stock_price", "=", "stock_price", "*", "growth_factor", "stock_price_sum", "=", "stock_price_sum", "+", "stock_price", "stock_price_avg", "=", "stock_price_sum", "/", "days", "zeros", "=", "np", ".", "zeros", "(", "paths", ",", "dtype", "=", "'float64'", ")", "r_factor", "=", "exp", "(", "-", "r", "*", "h", "*", "days", ")", "euro_put", "=", "r_factor", "*", "np", ".", "mean", "(", "np", ".", "maximum", "(", "zeros", ",", "K", "-", "stock_price", ")", ")", "asian_put", "=", "r_factor", "*", "np", ".", "mean", "(", "np", ".", "maximum", "(", "zeros", ",", "K", "-", "stock_price_avg", ")", ")", "euro_call", "=", "r_factor", "*", "np", ".", "mean", "(", "np", ".", "maximum", "(", "zeros", ",", "stock_price", "-", "K", ")", ")", "asian_call", "=", "r_factor", "*", "np", ".", "mean", "(", "np", ".", "maximum", "(", "zeros", ",", "stock_price_avg", "-", "K", ")", ")", "return", "(", "euro_call", ",", "euro_put", ",", "asian_call", ",", "asian_put", ")" ]
Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices.
[ "Price", "European", "and", "Asian", "options", "using", "a", "Monte", "Carlo", "method", "." ]
python
test
ralphje/imagemounter
imagemounter/disk.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/disk.py#L89-L120
def _get_mount_methods(self, disk_type): """Finds which mount methods are suitable for the specified disk type. Returns a list of all suitable mount methods. """ if self.disk_mounter == 'auto': methods = [] def add_method_if_exists(method): if (method == 'avfs' and _util.command_exists('avfsd')) or \ _util.command_exists(method): methods.append(method) if self.read_write: add_method_if_exists('xmount') else: if disk_type == 'encase': add_method_if_exists('ewfmount') elif disk_type == 'vmdk': add_method_if_exists('vmware-mount') add_method_if_exists('affuse') elif disk_type == 'dd': add_method_if_exists('affuse') elif disk_type == 'compressed': add_method_if_exists('avfs') elif disk_type == 'qcow2': add_method_if_exists('qemu-nbd') elif disk_type == 'vdi': add_method_if_exists('qemu-nbd') add_method_if_exists('xmount') else: methods = [self.disk_mounter] return methods
[ "def", "_get_mount_methods", "(", "self", ",", "disk_type", ")", ":", "if", "self", ".", "disk_mounter", "==", "'auto'", ":", "methods", "=", "[", "]", "def", "add_method_if_exists", "(", "method", ")", ":", "if", "(", "method", "==", "'avfs'", "and", "_util", ".", "command_exists", "(", "'avfsd'", ")", ")", "or", "_util", ".", "command_exists", "(", "method", ")", ":", "methods", ".", "append", "(", "method", ")", "if", "self", ".", "read_write", ":", "add_method_if_exists", "(", "'xmount'", ")", "else", ":", "if", "disk_type", "==", "'encase'", ":", "add_method_if_exists", "(", "'ewfmount'", ")", "elif", "disk_type", "==", "'vmdk'", ":", "add_method_if_exists", "(", "'vmware-mount'", ")", "add_method_if_exists", "(", "'affuse'", ")", "elif", "disk_type", "==", "'dd'", ":", "add_method_if_exists", "(", "'affuse'", ")", "elif", "disk_type", "==", "'compressed'", ":", "add_method_if_exists", "(", "'avfs'", ")", "elif", "disk_type", "==", "'qcow2'", ":", "add_method_if_exists", "(", "'qemu-nbd'", ")", "elif", "disk_type", "==", "'vdi'", ":", "add_method_if_exists", "(", "'qemu-nbd'", ")", "add_method_if_exists", "(", "'xmount'", ")", "else", ":", "methods", "=", "[", "self", ".", "disk_mounter", "]", "return", "methods" ]
Finds which mount methods are suitable for the specified disk type. Returns a list of all suitable mount methods.
[ "Finds", "which", "mount", "methods", "are", "suitable", "for", "the", "specified", "disk", "type", ".", "Returns", "a", "list", "of", "all", "suitable", "mount", "methods", "." ]
python
train
jantman/pypi-download-stats
pypi_download_stats/dataquery.py
https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/dataquery.py#L529-L543
def _have_cache_for_date(self, dt): """ Return True if we have cached data for all projects for the specified datetime. Return False otherwise. :param dt: datetime to find cache for :type dt: datetime.datetime :return: True if we have cache for all projects for this date, False otherwise :rtype: bool """ for p in self.projects: if self.cache.get(p, dt) is None: return False return True
[ "def", "_have_cache_for_date", "(", "self", ",", "dt", ")", ":", "for", "p", "in", "self", ".", "projects", ":", "if", "self", ".", "cache", ".", "get", "(", "p", ",", "dt", ")", "is", "None", ":", "return", "False", "return", "True" ]
Return True if we have cached data for all projects for the specified datetime. Return False otherwise. :param dt: datetime to find cache for :type dt: datetime.datetime :return: True if we have cache for all projects for this date, False otherwise :rtype: bool
[ "Return", "True", "if", "we", "have", "cached", "data", "for", "all", "projects", "for", "the", "specified", "datetime", ".", "Return", "False", "otherwise", "." ]
python
train
wright-group/WrightTools
WrightTools/_group.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/_group.py#L365-L409
def save(self, filepath=None, overwrite=False, verbose=True): """Save as root of a new file. Parameters ---------- filepath : Path-like object (optional) Filepath to write. If None, file is created using natural_name. overwrite : boolean (optional) Toggle overwrite behavior. Default is False. verbose : boolean (optional) Toggle talkback. Default is True Returns ------- str Written filepath. """ if filepath is None: filepath = pathlib.Path(".") / self.natural_name else: filepath = pathlib.Path(filepath) filepath = filepath.with_suffix(".wt5") filepath = filepath.absolute().expanduser() if filepath.exists(): if overwrite: filepath.unlink() else: raise wt_exceptions.FileExistsError(filepath) # copy to new file h5py.File(filepath) new = Group(filepath=filepath, edit_local=True) # attrs for k, v in self.attrs.items(): new.attrs[k] = v # children for k, v in self.items(): super().copy(v, new, name=v.natural_name) # finish new.flush() new.close() del new if verbose: print("file saved at", filepath) return str(filepath)
[ "def", "save", "(", "self", ",", "filepath", "=", "None", ",", "overwrite", "=", "False", ",", "verbose", "=", "True", ")", ":", "if", "filepath", "is", "None", ":", "filepath", "=", "pathlib", ".", "Path", "(", "\".\"", ")", "/", "self", ".", "natural_name", "else", ":", "filepath", "=", "pathlib", ".", "Path", "(", "filepath", ")", "filepath", "=", "filepath", ".", "with_suffix", "(", "\".wt5\"", ")", "filepath", "=", "filepath", ".", "absolute", "(", ")", ".", "expanduser", "(", ")", "if", "filepath", ".", "exists", "(", ")", ":", "if", "overwrite", ":", "filepath", ".", "unlink", "(", ")", "else", ":", "raise", "wt_exceptions", ".", "FileExistsError", "(", "filepath", ")", "# copy to new file", "h5py", ".", "File", "(", "filepath", ")", "new", "=", "Group", "(", "filepath", "=", "filepath", ",", "edit_local", "=", "True", ")", "# attrs", "for", "k", ",", "v", "in", "self", ".", "attrs", ".", "items", "(", ")", ":", "new", ".", "attrs", "[", "k", "]", "=", "v", "# children", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "super", "(", ")", ".", "copy", "(", "v", ",", "new", ",", "name", "=", "v", ".", "natural_name", ")", "# finish", "new", ".", "flush", "(", ")", "new", ".", "close", "(", ")", "del", "new", "if", "verbose", ":", "print", "(", "\"file saved at\"", ",", "filepath", ")", "return", "str", "(", "filepath", ")" ]
Save as root of a new file. Parameters ---------- filepath : Path-like object (optional) Filepath to write. If None, file is created using natural_name. overwrite : boolean (optional) Toggle overwrite behavior. Default is False. verbose : boolean (optional) Toggle talkback. Default is True Returns ------- str Written filepath.
[ "Save", "as", "root", "of", "a", "new", "file", "." ]
python
train
dossier/dossier.models
dossier/models/features/basic.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/basic.py#L41-L44
def phones(text): '''Returns list of phone numbers without punctuation.''' return imap(lambda m: m.group(0).replace('-', ''), REGEX_PHONE.finditer(text))
[ "def", "phones", "(", "text", ")", ":", "return", "imap", "(", "lambda", "m", ":", "m", ".", "group", "(", "0", ")", ".", "replace", "(", "'-'", ",", "''", ")", ",", "REGEX_PHONE", ".", "finditer", "(", "text", ")", ")" ]
Returns list of phone numbers without punctuation.
[ "Returns", "list", "of", "phone", "numbers", "without", "punctuation", "." ]
python
train
thiagopbueno/pyrddl
pyrddl/parser.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L450-L457
def p_state_cons_list(self, p): '''state_cons_list : state_cons_list state_cons_def | state_cons_def''' if len(p) == 3: p[1].append(p[2]) p[0] = p[1] elif len(p) == 2: p[0] = [p[1]]
[ "def", "p_state_cons_list", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "1", "]", ".", "append", "(", "p", "[", "2", "]", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]", "elif", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]" ]
state_cons_list : state_cons_list state_cons_def | state_cons_def
[ "state_cons_list", ":", "state_cons_list", "state_cons_def", "|", "state_cons_def" ]
python
train