repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
audreyr/cookiecutter
cookiecutter/cli.py
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/cli.py#L27-L32
def version_msg(): """Return the Cookiecutter version, location and Python powering it.""" python_version = sys.version[:3] location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) message = u'Cookiecutter %(version)s from {} (Python {})' return message.format(location, python_version)
[ "def", "version_msg", "(", ")", ":", "python_version", "=", "sys", ".", "version", "[", ":", "3", "]", "location", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ")", "message", "=", "u'Cookiecutter %(version)s from {} (Python {})'", "return", "message", ".", "format", "(", "location", ",", "python_version", ")" ]
Return the Cookiecutter version, location and Python powering it.
[ "Return", "the", "Cookiecutter", "version", "location", "and", "Python", "powering", "it", "." ]
python
train
ewels/MultiQC
multiqc/modules/rna_seqc/rna_seqc.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/rna_seqc/rna_seqc.py#L87-L118
def rnaseqc_general_stats (self): """ Add alignment rate to the general stats table """ headers = OrderedDict() headers['Expression Profiling Efficiency'] = { 'title': '% Expression Efficiency', 'description': 'Expression Profiling Efficiency: Ratio of exon reads to total reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn', 'modify': lambda x: float(x) * 100.0 } headers['Genes Detected'] = { 'title': '# Genes', 'description': 'Number of genes detected with at least 5 reads.', 'min': 0, 'scale': 'Bu', 'format': '{:,.0f}' } headers['rRNA rate'] = { 'title': '% rRNA Alignment', 'description': ' rRNA reads (non-duplicate and duplicate reads) per total reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Reds', 'modify': lambda x: float(x) * 100.0 } self.general_stats_addcols(self.rna_seqc_metrics, headers)
[ "def", "rnaseqc_general_stats", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'Expression Profiling Efficiency'", "]", "=", "{", "'title'", ":", "'% Expression Efficiency'", ",", "'description'", ":", "'Expression Profiling Efficiency: Ratio of exon reads to total reads'", ",", "'max'", ":", "100", ",", "'min'", ":", "0", ",", "'suffix'", ":", "'%'", ",", "'scale'", ":", "'YlGn'", ",", "'modify'", ":", "lambda", "x", ":", "float", "(", "x", ")", "*", "100.0", "}", "headers", "[", "'Genes Detected'", "]", "=", "{", "'title'", ":", "'# Genes'", ",", "'description'", ":", "'Number of genes detected with at least 5 reads.'", ",", "'min'", ":", "0", ",", "'scale'", ":", "'Bu'", ",", "'format'", ":", "'{:,.0f}'", "}", "headers", "[", "'rRNA rate'", "]", "=", "{", "'title'", ":", "'% rRNA Alignment'", ",", "'description'", ":", "' rRNA reads (non-duplicate and duplicate reads) per total reads'", ",", "'max'", ":", "100", ",", "'min'", ":", "0", ",", "'suffix'", ":", "'%'", ",", "'scale'", ":", "'Reds'", ",", "'modify'", ":", "lambda", "x", ":", "float", "(", "x", ")", "*", "100.0", "}", "self", ".", "general_stats_addcols", "(", "self", ".", "rna_seqc_metrics", ",", "headers", ")" ]
Add alignment rate to the general stats table
[ "Add", "alignment", "rate", "to", "the", "general", "stats", "table" ]
python
train
dbcli/cli_helpers
cli_helpers/config.py
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L119-L122
def system_config_files(self): """Get a list of absolute paths to the system config files.""" return [os.path.join(f, self.filename) for f in get_system_config_dirs( self.app_name, self.app_author)]
[ "def", "system_config_files", "(", "self", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "f", ",", "self", ".", "filename", ")", "for", "f", "in", "get_system_config_dirs", "(", "self", ".", "app_name", ",", "self", ".", "app_author", ")", "]" ]
Get a list of absolute paths to the system config files.
[ "Get", "a", "list", "of", "absolute", "paths", "to", "the", "system", "config", "files", "." ]
python
test
ArchiveTeam/wpull
wpull/application/tasks/shutdown.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/shutdown.py#L54-L60
def _update_exit_code_from_stats(cls, statistics: Statistics, app: Application): '''Set the current exit code based on the Statistics.''' for error_type in statistics.errors: exit_code = app.ERROR_CODE_MAP.get(error_type) if exit_code: app.update_exit_code(exit_code)
[ "def", "_update_exit_code_from_stats", "(", "cls", ",", "statistics", ":", "Statistics", ",", "app", ":", "Application", ")", ":", "for", "error_type", "in", "statistics", ".", "errors", ":", "exit_code", "=", "app", ".", "ERROR_CODE_MAP", ".", "get", "(", "error_type", ")", "if", "exit_code", ":", "app", ".", "update_exit_code", "(", "exit_code", ")" ]
Set the current exit code based on the Statistics.
[ "Set", "the", "current", "exit", "code", "based", "on", "the", "Statistics", "." ]
python
train
SBRG/ssbio
ssbio/protein/structure/properties/msms.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/msms.py#L69-L106
def get_msms_df_on_file(pdb_file, outfile=None, outdir=None, outext='_msms.df', force_rerun=False): """Run MSMS (using Biopython) on a PDB file. Saves a CSV file of: chain: chain ID resnum: residue number (PDB numbering) icode: residue insertion code res_depth: average depth of all atoms in a residue ca_depth: depth of the alpha carbon atom Depths are in units Angstroms. 1A = 10^-10 m = 1nm Args: pdb_file: Path to PDB file outfile: Optional name of output file (without extension) outdir: Optional output directory outext: Optional extension for the output file outext: Suffix appended to json results file force_rerun: Rerun MSMS even if results exist already Returns: Pandas DataFrame: ResidueDepth property_dict, reformatted """ # Create the output file name outfile = ssbio.utils.outfile_maker(inname=pdb_file, outname=outfile, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): # Load the structure my_structure = StructureIO(pdb_file) model = my_structure.first_model df = get_msms_df(model, pdb_id=op.splitext(op.basename(pdb_file))[0], outfile=outfile, outdir=outdir, outext=outext, force_rerun=force_rerun) else: log.debug('{}: already ran MSMS and force_rerun={}, loading results'.format(outfile, force_rerun)) df = pd.read_csv(outfile, index_col=0) return df
[ "def", "get_msms_df_on_file", "(", "pdb_file", ",", "outfile", "=", "None", ",", "outdir", "=", "None", ",", "outext", "=", "'_msms.df'", ",", "force_rerun", "=", "False", ")", ":", "# Create the output file name", "outfile", "=", "ssbio", ".", "utils", ".", "outfile_maker", "(", "inname", "=", "pdb_file", ",", "outname", "=", "outfile", ",", "outdir", "=", "outdir", ",", "outext", "=", "outext", ")", "if", "ssbio", ".", "utils", ".", "force_rerun", "(", "flag", "=", "force_rerun", ",", "outfile", "=", "outfile", ")", ":", "# Load the structure", "my_structure", "=", "StructureIO", "(", "pdb_file", ")", "model", "=", "my_structure", ".", "first_model", "df", "=", "get_msms_df", "(", "model", ",", "pdb_id", "=", "op", ".", "splitext", "(", "op", ".", "basename", "(", "pdb_file", ")", ")", "[", "0", "]", ",", "outfile", "=", "outfile", ",", "outdir", "=", "outdir", ",", "outext", "=", "outext", ",", "force_rerun", "=", "force_rerun", ")", "else", ":", "log", ".", "debug", "(", "'{}: already ran MSMS and force_rerun={}, loading results'", ".", "format", "(", "outfile", ",", "force_rerun", ")", ")", "df", "=", "pd", ".", "read_csv", "(", "outfile", ",", "index_col", "=", "0", ")", "return", "df" ]
Run MSMS (using Biopython) on a PDB file. Saves a CSV file of: chain: chain ID resnum: residue number (PDB numbering) icode: residue insertion code res_depth: average depth of all atoms in a residue ca_depth: depth of the alpha carbon atom Depths are in units Angstroms. 1A = 10^-10 m = 1nm Args: pdb_file: Path to PDB file outfile: Optional name of output file (without extension) outdir: Optional output directory outext: Optional extension for the output file outext: Suffix appended to json results file force_rerun: Rerun MSMS even if results exist already Returns: Pandas DataFrame: ResidueDepth property_dict, reformatted
[ "Run", "MSMS", "(", "using", "Biopython", ")", "on", "a", "PDB", "file", "." ]
python
train
eyurtsev/fcsparser
fcsparser/api.py
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L295-L310
def read_analysis(self, file_handle): """Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data """ start = self.annotation['__header__']['analysis start'] end = self.annotation['__header__']['analysis end'] if start != 0 and end != 0: file_handle.seek(start, 0) self._analysis = file_handle.read(end - start) else: self._analysis = None
[ "def", "read_analysis", "(", "self", ",", "file_handle", ")", ":", "start", "=", "self", ".", "annotation", "[", "'__header__'", "]", "[", "'analysis start'", "]", "end", "=", "self", ".", "annotation", "[", "'__header__'", "]", "[", "'analysis end'", "]", "if", "start", "!=", "0", "and", "end", "!=", "0", ":", "file_handle", ".", "seek", "(", "start", ",", "0", ")", "self", ".", "_analysis", "=", "file_handle", ".", "read", "(", "end", "-", "start", ")", "else", ":", "self", ".", "_analysis", "=", "None" ]
Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data
[ "Read", "the", "ANALYSIS", "segment", "of", "the", "FCS", "file", "and", "store", "it", "in", "self", ".", "analysis", "." ]
python
train
angr/angr
angr/analyses/ddg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/ddg.py#L885-L901
def _kill(self, variable, code_loc): # pylint:disable=no-self-use """ Kill previous defs. addr_list is a list of normalized addresses. """ # Case 1: address perfectly match, we kill # Case 2: a is a subset of the original address # Case 3: a is a superset of the original address # the previous definition is killed. mark it in data graph. if variable in self._live_defs: for loc in self._live_defs.lookup_defs(variable): pv = ProgramVariable(variable, loc, arch=self.project.arch) self._data_graph_add_edge(pv, ProgramVariable(variable, code_loc, arch=self.project.arch), type='kill') self._live_defs.kill_def(variable, code_loc)
[ "def", "_kill", "(", "self", ",", "variable", ",", "code_loc", ")", ":", "# pylint:disable=no-self-use", "# Case 1: address perfectly match, we kill", "# Case 2: a is a subset of the original address", "# Case 3: a is a superset of the original address", "# the previous definition is killed. mark it in data graph.", "if", "variable", "in", "self", ".", "_live_defs", ":", "for", "loc", "in", "self", ".", "_live_defs", ".", "lookup_defs", "(", "variable", ")", ":", "pv", "=", "ProgramVariable", "(", "variable", ",", "loc", ",", "arch", "=", "self", ".", "project", ".", "arch", ")", "self", ".", "_data_graph_add_edge", "(", "pv", ",", "ProgramVariable", "(", "variable", ",", "code_loc", ",", "arch", "=", "self", ".", "project", ".", "arch", ")", ",", "type", "=", "'kill'", ")", "self", ".", "_live_defs", ".", "kill_def", "(", "variable", ",", "code_loc", ")" ]
Kill previous defs. addr_list is a list of normalized addresses.
[ "Kill", "previous", "defs", ".", "addr_list", "is", "a", "list", "of", "normalized", "addresses", "." ]
python
train
ff0000/scarlet
scarlet/scheduling/fields.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/scheduling/fields.py#L42-L46
def get_db_prep_value(self, value, connection, prepared=False): """Convert JSON object to a string""" if isinstance(value, basestring): return value return json.dumps(value, **self.dump_kwargs)
[ "def", "get_db_prep_value", "(", "self", ",", "value", ",", "connection", ",", "prepared", "=", "False", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "return", "value", "return", "json", ".", "dumps", "(", "value", ",", "*", "*", "self", ".", "dump_kwargs", ")" ]
Convert JSON object to a string
[ "Convert", "JSON", "object", "to", "a", "string" ]
python
train
elifesciences/elife-article
elifearticle/utils.py
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/utils.py#L93-L105
def version_from_xml_filename(filename): "extract the numeric version from the xml filename" try: filename_parts = filename.split(os.sep)[-1].split('-') except AttributeError: return None if len(filename_parts) == 3: try: return int(filename_parts[-1].lstrip('v').rstrip('.xml')) except ValueError: return None else: return None
[ "def", "version_from_xml_filename", "(", "filename", ")", ":", "try", ":", "filename_parts", "=", "filename", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ".", "split", "(", "'-'", ")", "except", "AttributeError", ":", "return", "None", "if", "len", "(", "filename_parts", ")", "==", "3", ":", "try", ":", "return", "int", "(", "filename_parts", "[", "-", "1", "]", ".", "lstrip", "(", "'v'", ")", ".", "rstrip", "(", "'.xml'", ")", ")", "except", "ValueError", ":", "return", "None", "else", ":", "return", "None" ]
extract the numeric version from the xml filename
[ "extract", "the", "numeric", "version", "from", "the", "xml", "filename" ]
python
train
tBaxter/activity-monitor
activity_monitor/managers.py
https://github.com/tBaxter/activity-monitor/blob/be6c6edc7c6b4141923b47376502cde0f785eb68/activity_monitor/managers.py#L47-L58
def get_last_update_of_model(self, model, **kwargs): """ Return the last time a given model's items were updated. Returns the epoch if the items were never updated. """ qs = self.get_for_model(model) if kwargs: qs = qs.filter(**kwargs) try: return qs.order_by('-timestamp')[0].timestamp except IndexError: return datetime.datetime.fromtimestamp(0)
[ "def", "get_last_update_of_model", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "qs", "=", "self", ".", "get_for_model", "(", "model", ")", "if", "kwargs", ":", "qs", "=", "qs", ".", "filter", "(", "*", "*", "kwargs", ")", "try", ":", "return", "qs", ".", "order_by", "(", "'-timestamp'", ")", "[", "0", "]", ".", "timestamp", "except", "IndexError", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "0", ")" ]
Return the last time a given model's items were updated. Returns the epoch if the items were never updated.
[ "Return", "the", "last", "time", "a", "given", "model", "s", "items", "were", "updated", ".", "Returns", "the", "epoch", "if", "the", "items", "were", "never", "updated", "." ]
python
train
googlefonts/fontmake
Lib/fontmake/font_project.py
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L943-L981
def run_from_ufos(self, ufos, output=(), **kwargs): """Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs. """ if set(output) == {"ufo"}: return # the `ufos` parameter can be a list of UFO objects # or it can be a path (string) with a glob syntax ufo_paths = [] if isinstance(ufos, basestring): ufo_paths = glob.glob(ufos) ufos = [Font(x) for x in ufo_paths] elif isinstance(ufos, list): # ufos can be either paths or open Font objects, so normalize them ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos] ufo_paths = [x.path for x in ufos] else: raise FontmakeError( "UFOs parameter is neither a defcon.Font object, a path or a glob, " "nor a list of any of these.", ufos, ) need_reload = False if "otf" in output: self.build_otfs(ufos, **kwargs) need_reload = True if "ttf" in output: if need_reload: ufos = [Font(path) for path in ufo_paths] self.build_ttfs(ufos, **kwargs) need_reload = True
[ "def", "run_from_ufos", "(", "self", ",", "ufos", ",", "output", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "if", "set", "(", "output", ")", "==", "{", "\"ufo\"", "}", ":", "return", "# the `ufos` parameter can be a list of UFO objects", "# or it can be a path (string) with a glob syntax", "ufo_paths", "=", "[", "]", "if", "isinstance", "(", "ufos", ",", "basestring", ")", ":", "ufo_paths", "=", "glob", ".", "glob", "(", "ufos", ")", "ufos", "=", "[", "Font", "(", "x", ")", "for", "x", "in", "ufo_paths", "]", "elif", "isinstance", "(", "ufos", ",", "list", ")", ":", "# ufos can be either paths or open Font objects, so normalize them", "ufos", "=", "[", "Font", "(", "x", ")", "if", "isinstance", "(", "x", ",", "basestring", ")", "else", "x", "for", "x", "in", "ufos", "]", "ufo_paths", "=", "[", "x", ".", "path", "for", "x", "in", "ufos", "]", "else", ":", "raise", "FontmakeError", "(", "\"UFOs parameter is neither a defcon.Font object, a path or a glob, \"", "\"nor a list of any of these.\"", ",", "ufos", ",", ")", "need_reload", "=", "False", "if", "\"otf\"", "in", "output", ":", "self", ".", "build_otfs", "(", "ufos", ",", "*", "*", "kwargs", ")", "need_reload", "=", "True", "if", "\"ttf\"", "in", "output", ":", "if", "need_reload", ":", "ufos", "=", "[", "Font", "(", "path", ")", "for", "path", "in", "ufo_paths", "]", "self", ".", "build_ttfs", "(", "ufos", ",", "*", "*", "kwargs", ")", "need_reload", "=", "True" ]
Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs.
[ "Run", "toolchain", "from", "UFO", "sources", "." ]
python
train
nateshmbhat/pyttsx3
pyttsx3/engine.py
https://github.com/nateshmbhat/pyttsx3/blob/0f304bff4812d50937393f1e3d7f89c9862a1623/pyttsx3/engine.py#L37-L51
def _notify(self, topic, **kwargs): """ Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict """ for cb in self._connects.get(topic, []): try: cb(**kwargs) except Exception: if self._debug: traceback.print_exc()
[ "def", "_notify", "(", "self", ",", "topic", ",", "*", "*", "kwargs", ")", ":", "for", "cb", "in", "self", ".", "_connects", ".", "get", "(", "topic", ",", "[", "]", ")", ":", "try", ":", "cb", "(", "*", "*", "kwargs", ")", "except", "Exception", ":", "if", "self", ".", "_debug", ":", "traceback", ".", "print_exc", "(", ")" ]
Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict
[ "Invokes", "callbacks", "for", "an", "event", "topic", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/ssh_migrations.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/ssh_migrations.py#L89-L125
def ssh_known_host_key(host, application_name, user=None): """Return the first entry in known_hosts for host. :param host: hostname to lookup in file. :type host: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str :returns: Host key :rtype: str or None """ cmd = [ 'ssh-keygen', '-f', known_hosts(application_name, user), '-H', '-F', host] try: # The first line of output is like '# Host xx found: line 1 type RSA', # which should be excluded. output = subprocess.check_output(cmd) except subprocess.CalledProcessError as e: # RC of 1 seems to be legitimate for most ssh-keygen -F calls. if e.returncode == 1: output = e.output else: raise output = output.strip() if output: # Bug #1500589 cmd has 0 rc on precise if entry not present lines = output.split('\n') if len(lines) >= 1: return lines[0] return None
[ "def", "ssh_known_host_key", "(", "host", ",", "application_name", ",", "user", "=", "None", ")", ":", "cmd", "=", "[", "'ssh-keygen'", ",", "'-f'", ",", "known_hosts", "(", "application_name", ",", "user", ")", ",", "'-H'", ",", "'-F'", ",", "host", "]", "try", ":", "# The first line of output is like '# Host xx found: line 1 type RSA',", "# which should be excluded.", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "# RC of 1 seems to be legitimate for most ssh-keygen -F calls.", "if", "e", ".", "returncode", "==", "1", ":", "output", "=", "e", ".", "output", "else", ":", "raise", "output", "=", "output", ".", "strip", "(", ")", "if", "output", ":", "# Bug #1500589 cmd has 0 rc on precise if entry not present", "lines", "=", "output", ".", "split", "(", "'\\n'", ")", "if", "len", "(", "lines", ")", ">=", "1", ":", "return", "lines", "[", "0", "]", "return", "None" ]
Return the first entry in known_hosts for host. :param host: hostname to lookup in file. :type host: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str :returns: Host key :rtype: str or None
[ "Return", "the", "first", "entry", "in", "known_hosts", "for", "host", "." ]
python
train
alberanid/python-iplib
iplib.py
https://github.com/alberanid/python-iplib/blob/488b56fe57ad836b27feec9e76f51883db28faa6/iplib.py#L551-L554
def get_hex(self): """Return the hexadecimal notation of the address/netmask.""" return _convert(self._ip_dec, notation=IP_HEX, inotation=IP_DEC, _check=False, _isnm=self._isnm)
[ "def", "get_hex", "(", "self", ")", ":", "return", "_convert", "(", "self", ".", "_ip_dec", ",", "notation", "=", "IP_HEX", ",", "inotation", "=", "IP_DEC", ",", "_check", "=", "False", ",", "_isnm", "=", "self", ".", "_isnm", ")" ]
Return the hexadecimal notation of the address/netmask.
[ "Return", "the", "hexadecimal", "notation", "of", "the", "address", "/", "netmask", "." ]
python
valid
yatiml/yatiml
yatiml/helpers.py
https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/helpers.py#L310-L319
def dashes_to_unders_in_keys(self) -> None: """Replaces dashes with underscores in key names. For each attribute in a mapping, this replaces any dashes in \ its keys with underscores. Handy because Python does not \ accept dashes in identifiers, while some YAML-based file \ formats use dashes in their keys. """ for key_node, _ in self.yaml_node.value: key_node.value = key_node.value.replace('-', '_')
[ "def", "dashes_to_unders_in_keys", "(", "self", ")", "->", "None", ":", "for", "key_node", ",", "_", "in", "self", ".", "yaml_node", ".", "value", ":", "key_node", ".", "value", "=", "key_node", ".", "value", ".", "replace", "(", "'-'", ",", "'_'", ")" ]
Replaces dashes with underscores in key names. For each attribute in a mapping, this replaces any dashes in \ its keys with underscores. Handy because Python does not \ accept dashes in identifiers, while some YAML-based file \ formats use dashes in their keys.
[ "Replaces", "dashes", "with", "underscores", "in", "key", "names", "." ]
python
train
emory-libraries/eulfedora
eulfedora/models.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/models.py#L451-L461
def get_chunked_content(self, chunksize=4096): '''Generator that returns the datastream content in chunks, so larger datastreams can be used without reading the entire contents into memory.''' # get the datastream dissemination, but return the actual http response r = self.obj.api.getDatastreamDissemination(self.obj.pid, self.id, stream=True, asOfDateTime=self.as_of_date) # read and yield the response in chunks for chunk in r.iter_content(chunksize): yield chunk
[ "def", "get_chunked_content", "(", "self", ",", "chunksize", "=", "4096", ")", ":", "# get the datastream dissemination, but return the actual http response", "r", "=", "self", ".", "obj", ".", "api", ".", "getDatastreamDissemination", "(", "self", ".", "obj", ".", "pid", ",", "self", ".", "id", ",", "stream", "=", "True", ",", "asOfDateTime", "=", "self", ".", "as_of_date", ")", "# read and yield the response in chunks", "for", "chunk", "in", "r", ".", "iter_content", "(", "chunksize", ")", ":", "yield", "chunk" ]
Generator that returns the datastream content in chunks, so larger datastreams can be used without reading the entire contents into memory.
[ "Generator", "that", "returns", "the", "datastream", "content", "in", "chunks", "so", "larger", "datastreams", "can", "be", "used", "without", "reading", "the", "entire", "contents", "into", "memory", "." ]
python
train
mfcloud/python-zvm-sdk
smtLayer/vmUtils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/vmUtils.py#L724-L779
def isLoggedOn(rh, userid): """ Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off. """ rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid) results = { 'overallRC': 0, 'rc': 0, 'rs': 0, } cmd = ["sudo", "/sbin/vmcp", "query", "user", userid] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) except CalledProcessError as e: search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode() match = re.search(search_pattern, e.output) if match: # Not logged on results['rs'] = 1 else: # Abnormal failure rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode except Exception as e: # All other exceptions. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " + str(results['overallRC']) + " rc: " + str(results['rc']) + " rs: " + str(results['rs'])) return results
[ "def", "isLoggedOn", "(", "rh", ",", "userid", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter vmUtils.isLoggedOn, userid: \"", "+", "userid", ")", "results", "=", "{", "'overallRC'", ":", "0", ",", "'rc'", ":", "0", ",", "'rs'", ":", "0", ",", "}", "cmd", "=", "[", "\"sudo\"", ",", "\"/sbin/vmcp\"", ",", "\"query\"", ",", "\"user\"", ",", "userid", "]", "strCmd", "=", "' '", ".", "join", "(", "cmd", ")", "rh", ".", "printSysLog", "(", "\"Invoking: \"", "+", "strCmd", ")", "try", ":", "subprocess", ".", "check_output", "(", "cmd", ",", "close_fds", "=", "True", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "CalledProcessError", "as", "e", ":", "search_pattern", "=", "'(^HCP\\w\\w\\w045E|^HCP\\w\\w\\w361E)'", ".", "encode", "(", ")", "match", "=", "re", ".", "search", "(", "search_pattern", ",", "e", ".", "output", ")", "if", "match", ":", "# Not logged on", "results", "[", "'rs'", "]", "=", "1", "else", ":", "# Abnormal failure", "rh", ".", "printLn", "(", "\"ES\"", ",", "msgs", ".", "msg", "[", "'0415'", "]", "[", "1", "]", "%", "(", "modId", ",", "strCmd", ",", "e", ".", "returncode", ",", "e", ".", "output", ")", ")", "results", "=", "msgs", ".", "msg", "[", "'0415'", "]", "[", "0", "]", "results", "[", "'rs'", "]", "=", "e", ".", "returncode", "except", "Exception", "as", "e", ":", "# All other exceptions.", "results", "=", "msgs", ".", "msg", "[", "'0421'", "]", "[", "0", "]", "rh", ".", "printLn", "(", "\"ES\"", ",", "msgs", ".", "msg", "[", "'0421'", "]", "[", "1", "]", "%", "(", "modId", ",", "strCmd", ",", "type", "(", "e", ")", ".", "__name__", ",", "str", "(", "e", ")", ")", ")", "rh", ".", "printSysLog", "(", "\"Exit vmUtils.isLoggedOn, overallRC: \"", "+", "str", "(", "results", "[", "'overallRC'", "]", ")", "+", "\" rc: \"", "+", "str", "(", "results", "[", "'rc'", "]", ")", "+", "\" rs: \"", "+", "str", "(", "results", "[", "'rs'", "]", ")", ")", "return", "results" ]
Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off.
[ "Determine", "whether", "a", "virtual", "machine", "is", "logged", "on", "." ]
python
train
mojaie/chorus
chorus/util/geometry.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/util/geometry.py#L130-L146
def t_seg(p1, p2, t, align=0): """ trim segment Args: p1, p2: point(x, y) t: scaling factor (1 - trimed segment / original segment) align: 1: trim p2, 2: trim p1, 0: both side Return: trimmed segment(p1, p2) """ v = vector(p1, p2) result = { 1: lambda a, b: (a, translate(b, scale(v, -t))), 2: lambda a, b: (translate(a, scale(v, t)), b), 0: lambda a, b: (translate(a, scale(v, t / 2)), translate(b, scale(v, -t / 2))) } return result[align](p1, p2)
[ "def", "t_seg", "(", "p1", ",", "p2", ",", "t", ",", "align", "=", "0", ")", ":", "v", "=", "vector", "(", "p1", ",", "p2", ")", "result", "=", "{", "1", ":", "lambda", "a", ",", "b", ":", "(", "a", ",", "translate", "(", "b", ",", "scale", "(", "v", ",", "-", "t", ")", ")", ")", ",", "2", ":", "lambda", "a", ",", "b", ":", "(", "translate", "(", "a", ",", "scale", "(", "v", ",", "t", ")", ")", ",", "b", ")", ",", "0", ":", "lambda", "a", ",", "b", ":", "(", "translate", "(", "a", ",", "scale", "(", "v", ",", "t", "/", "2", ")", ")", ",", "translate", "(", "b", ",", "scale", "(", "v", ",", "-", "t", "/", "2", ")", ")", ")", "}", "return", "result", "[", "align", "]", "(", "p1", ",", "p2", ")" ]
trim segment Args: p1, p2: point(x, y) t: scaling factor (1 - trimed segment / original segment) align: 1: trim p2, 2: trim p1, 0: both side Return: trimmed segment(p1, p2)
[ "trim", "segment", "Args", ":", "p1", "p2", ":", "point", "(", "x", "y", ")", "t", ":", "scaling", "factor", "(", "1", "-", "trimed", "segment", "/", "original", "segment", ")", "align", ":", "1", ":", "trim", "p2", "2", ":", "trim", "p1", "0", ":", "both", "side", "Return", ":", "trimmed", "segment", "(", "p1", "p2", ")" ]
python
train
Danielhiversen/pyMetno
metno/__init__.py
https://github.com/Danielhiversen/pyMetno/blob/7d200a495fdea0e1a9310069fdcd65f205d6e6f5/metno/__init__.py#L187-L254
async def update(self): """Update data.""" if self._last_update is None or datetime.datetime.now() - self._last_update > datetime.timedelta(3600): try: with async_timeout.timeout(10): resp = await self._websession.get(self._api_url, params=self._urlparams) if resp.status != 200: _LOGGER.error('%s returned %s', self._api_url, resp.status) return False self._data = await resp.json() except (asyncio.TimeoutError, aiohttp.ClientError) as err: _LOGGER.error('%s returned %s', self._api_url, err) return False try: forecast_time = datetime.datetime.now(pytz.utc) + datetime.timedelta(hours=self._forecast) data = None min_dist = 24 * 3600 for _data in self._data['data']['time']: valid_from = parse_datetime(_data['from']) valid_to = parse_datetime(_data['to']) if forecast_time >= valid_to: # Has already passed. Never select this. continue average_dist = (abs((valid_to - forecast_time).total_seconds()) + abs((valid_from - forecast_time).total_seconds())) if average_dist < min_dist: min_dist = average_dist data = _data if not data: return False self.data['aqi'] = data.get('variables', {}).get('AQI', {}).get('value') self.data['pm10_concentration'] = data.get('variables', {}).get('pm10_concentration', {}).get('value') self.data['o3_concentration'] = data.get('variables', {}).get('o3_concentration', {}).get('value') self.data['no2_concentration'] = data.get('variables', {}).get('no2_concentration', {}).get('value') self.data['pm25_concentration'] = data.get('variables', {}).get('pm25_concentration', {}).get('value') self.data['location'] = "{}, {}".format(self._data.get('meta', {}).get('location', {}).get('name'), self._data.get('meta', {}).get('superlocation', {}).get('name')) state = data.get('variables', {}).get('AQI', {}).get('value') if state < 2: level = "low" elif state < 3: level = "medium" else: level = "high" self.data['level'] = level self.units['aqi'] = data.get('variables', {}).get('AQI', {}).get('units') self.units['pm10_concentration'] = data.get('variables', {}).get('pm10_concentration', {}).get('units') self.units['o3_concentration'] = data.get('variables', {}).get('o3_concentration', {}).get('units') self.units['no2_concentration'] = data.get('variables', {}).get('no2_concentration', {}).get('units') self.units['pm25_concentration'] = data.get('variables', {}).get('pm25_concentration', {}).get('units') self.units['aqi'] = data.get('variables', {}).get('AQI', {}).get('value') except IndexError as err: _LOGGER.error('%s returned %s', resp.url, err) return False return True
[ "async", "def", "update", "(", "self", ")", ":", "if", "self", ".", "_last_update", "is", "None", "or", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "self", ".", "_last_update", ">", "datetime", ".", "timedelta", "(", "3600", ")", ":", "try", ":", "with", "async_timeout", ".", "timeout", "(", "10", ")", ":", "resp", "=", "await", "self", ".", "_websession", ".", "get", "(", "self", ".", "_api_url", ",", "params", "=", "self", ".", "_urlparams", ")", "if", "resp", ".", "status", "!=", "200", ":", "_LOGGER", ".", "error", "(", "'%s returned %s'", ",", "self", ".", "_api_url", ",", "resp", ".", "status", ")", "return", "False", "self", ".", "_data", "=", "await", "resp", ".", "json", "(", ")", "except", "(", "asyncio", ".", "TimeoutError", ",", "aiohttp", ".", "ClientError", ")", "as", "err", ":", "_LOGGER", ".", "error", "(", "'%s returned %s'", ",", "self", ".", "_api_url", ",", "err", ")", "return", "False", "try", ":", "forecast_time", "=", "datetime", ".", "datetime", ".", "now", "(", "pytz", ".", "utc", ")", "+", "datetime", ".", "timedelta", "(", "hours", "=", "self", ".", "_forecast", ")", "data", "=", "None", "min_dist", "=", "24", "*", "3600", "for", "_data", "in", "self", ".", "_data", "[", "'data'", "]", "[", "'time'", "]", ":", "valid_from", "=", "parse_datetime", "(", "_data", "[", "'from'", "]", ")", "valid_to", "=", "parse_datetime", "(", "_data", "[", "'to'", "]", ")", "if", "forecast_time", ">=", "valid_to", ":", "# Has already passed. Never select this.", "continue", "average_dist", "=", "(", "abs", "(", "(", "valid_to", "-", "forecast_time", ")", ".", "total_seconds", "(", ")", ")", "+", "abs", "(", "(", "valid_from", "-", "forecast_time", ")", ".", "total_seconds", "(", ")", ")", ")", "if", "average_dist", "<", "min_dist", ":", "min_dist", "=", "average_dist", "data", "=", "_data", "if", "not", "data", ":", "return", "False", "self", ".", "data", "[", "'aqi'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'AQI'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "self", ".", "data", "[", "'pm10_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'pm10_concentration'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "self", ".", "data", "[", "'o3_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'o3_concentration'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "self", ".", "data", "[", "'no2_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'no2_concentration'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "self", ".", "data", "[", "'pm25_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'pm25_concentration'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "self", ".", "data", "[", "'location'", "]", "=", "\"{}, {}\"", ".", "format", "(", "self", ".", "_data", ".", "get", "(", "'meta'", ",", "{", "}", ")", ".", "get", "(", "'location'", ",", "{", "}", ")", ".", "get", "(", "'name'", ")", ",", "self", ".", "_data", ".", "get", "(", "'meta'", ",", "{", "}", ")", ".", "get", "(", "'superlocation'", ",", "{", "}", ")", ".", "get", "(", "'name'", ")", ")", "state", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'AQI'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "if", "state", "<", "2", ":", "level", "=", "\"low\"", "elif", "state", "<", "3", ":", "level", "=", "\"medium\"", "else", ":", "level", "=", "\"high\"", "self", ".", "data", "[", "'level'", "]", "=", "level", "self", ".", "units", "[", "'aqi'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'AQI'", ",", "{", "}", ")", ".", "get", "(", "'units'", ")", "self", ".", "units", "[", "'pm10_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'pm10_concentration'", ",", "{", "}", ")", ".", "get", "(", "'units'", ")", "self", ".", "units", "[", "'o3_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'o3_concentration'", ",", "{", "}", ")", ".", "get", "(", "'units'", ")", "self", ".", "units", "[", "'no2_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'no2_concentration'", ",", "{", "}", ")", ".", "get", "(", "'units'", ")", "self", ".", "units", "[", "'pm25_concentration'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'pm25_concentration'", ",", "{", "}", ")", ".", "get", "(", "'units'", ")", "self", ".", "units", "[", "'aqi'", "]", "=", "data", ".", "get", "(", "'variables'", ",", "{", "}", ")", ".", "get", "(", "'AQI'", ",", "{", "}", ")", ".", "get", "(", "'value'", ")", "except", "IndexError", "as", "err", ":", "_LOGGER", ".", "error", "(", "'%s returned %s'", ",", "resp", ".", "url", ",", "err", ")", "return", "False", "return", "True" ]
Update data.
[ "Update", "data", "." ]
python
train
tmontaigu/pylas
pylas/headers/rawheader.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L200-L203
def mins(self): """ Returns de minimum values of x, y, z as a numpy array """ return np.array([self.x_min, self.y_min, self.z_min])
[ "def", "mins", "(", "self", ")", ":", "return", "np", ".", "array", "(", "[", "self", ".", "x_min", ",", "self", ".", "y_min", ",", "self", ".", "z_min", "]", ")" ]
Returns de minimum values of x, y, z as a numpy array
[ "Returns", "de", "minimum", "values", "of", "x", "y", "z", "as", "a", "numpy", "array" ]
python
test
rhjdjong/SlipLib
sliplib/slipsocket.py
https://github.com/rhjdjong/SlipLib/blob/8300dba3e512bca282380f234be34d75f4a73ce1/sliplib/slipsocket.py#L94-L110
def create_connection(cls, address, timeout=None, source_address=None): """Create a SlipSocket connection. This convenience method creates a connection to the the specified address using the :func:`socket.create_connection` function. The socket that is returned from that call is automatically wrapped in a :class:`SlipSocket` object. .. note:: The :meth:`create_connection` method does not magically turn the socket at the remote address into a SlipSocket. For the connection to work properly, the remote socket must already have been configured to use the SLIP protocol. """ sock = socket.create_connection(address, timeout, source_address) return cls(sock)
[ "def", "create_connection", "(", "cls", ",", "address", ",", "timeout", "=", "None", ",", "source_address", "=", "None", ")", ":", "sock", "=", "socket", ".", "create_connection", "(", "address", ",", "timeout", ",", "source_address", ")", "return", "cls", "(", "sock", ")" ]
Create a SlipSocket connection. This convenience method creates a connection to the the specified address using the :func:`socket.create_connection` function. The socket that is returned from that call is automatically wrapped in a :class:`SlipSocket` object. .. note:: The :meth:`create_connection` method does not magically turn the socket at the remote address into a SlipSocket. For the connection to work properly, the remote socket must already have been configured to use the SLIP protocol.
[ "Create", "a", "SlipSocket", "connection", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1624-L1646
def change_state_id(self, state_id=None): """ Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state """ old_state_id = self.state_id super(ContainerState, self).change_state_id(state_id) # Use private variables to change ids to prevent validity checks # change id in all transitions for transition in self.transitions.values(): if transition.from_state == old_state_id: transition._from_state = self.state_id if transition.to_state == old_state_id: transition._to_state = self.state_id # change id in all data_flows for data_flow in self.data_flows.values(): if data_flow.from_state == old_state_id: data_flow._from_state = self.state_id if data_flow.to_state == old_state_id: data_flow._to_state = self.state_id
[ "def", "change_state_id", "(", "self", ",", "state_id", "=", "None", ")", ":", "old_state_id", "=", "self", ".", "state_id", "super", "(", "ContainerState", ",", "self", ")", ".", "change_state_id", "(", "state_id", ")", "# Use private variables to change ids to prevent validity checks", "# change id in all transitions", "for", "transition", "in", "self", ".", "transitions", ".", "values", "(", ")", ":", "if", "transition", ".", "from_state", "==", "old_state_id", ":", "transition", ".", "_from_state", "=", "self", ".", "state_id", "if", "transition", ".", "to_state", "==", "old_state_id", ":", "transition", ".", "_to_state", "=", "self", ".", "state_id", "# change id in all data_flows", "for", "data_flow", "in", "self", ".", "data_flows", ".", "values", "(", ")", ":", "if", "data_flow", ".", "from_state", "==", "old_state_id", ":", "data_flow", ".", "_from_state", "=", "self", ".", "state_id", "if", "data_flow", ".", "to_state", "==", "old_state_id", ":", "data_flow", ".", "_to_state", "=", "self", ".", "state_id" ]
Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state
[ "Changes", "the", "id", "of", "the", "state", "to", "a", "new", "id", ".", "This", "functions", "replaces", "the", "old", "state_id", "with", "the", "new", "state_id", "in", "all", "data", "flows", "and", "transitions", "." ]
python
train
tslight/treepick
treepick/actions.py
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/actions.py#L70-L90
def nextparent(self, parent, depth): ''' Add lines to current line by traversing the grandparent object again and once we reach our current line counting every line that is prefixed with the parent directory. ''' if depth > 1: # can't jump to parent of root node! pdir = os.path.dirname(self.name) line = 0 for c, d in parent.traverse(): if line > parent.curline and c.name.startswith(pdir): parent.curline += 1 line += 1 else: # otherwise just skip to next directory line = -1 # skip hidden parent node for c, d in parent.traverse(): if line > parent.curline: parent.curline += 1 if os.path.isdir(c.name) and c.name in parent.children[0:]: break line += 1
[ "def", "nextparent", "(", "self", ",", "parent", ",", "depth", ")", ":", "if", "depth", ">", "1", ":", "# can't jump to parent of root node!", "pdir", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "name", ")", "line", "=", "0", "for", "c", ",", "d", "in", "parent", ".", "traverse", "(", ")", ":", "if", "line", ">", "parent", ".", "curline", "and", "c", ".", "name", ".", "startswith", "(", "pdir", ")", ":", "parent", ".", "curline", "+=", "1", "line", "+=", "1", "else", ":", "# otherwise just skip to next directory", "line", "=", "-", "1", "# skip hidden parent node", "for", "c", ",", "d", "in", "parent", ".", "traverse", "(", ")", ":", "if", "line", ">", "parent", ".", "curline", ":", "parent", ".", "curline", "+=", "1", "if", "os", ".", "path", ".", "isdir", "(", "c", ".", "name", ")", "and", "c", ".", "name", "in", "parent", ".", "children", "[", "0", ":", "]", ":", "break", "line", "+=", "1" ]
Add lines to current line by traversing the grandparent object again and once we reach our current line counting every line that is prefixed with the parent directory.
[ "Add", "lines", "to", "current", "line", "by", "traversing", "the", "grandparent", "object", "again", "and", "once", "we", "reach", "our", "current", "line", "counting", "every", "line", "that", "is", "prefixed", "with", "the", "parent", "directory", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L427-L452
def mixed_list_file(cls, filename, values, bits): """ Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size} """ fd = open(filename, 'w') for original in values: try: parsed = cls.integer(original, bits) except TypeError: parsed = repr(original) print >> fd, parsed fd.close()
[ "def", "mixed_list_file", "(", "cls", ",", "filename", ",", "values", ",", "bits", ")", ":", "fd", "=", "open", "(", "filename", ",", "'w'", ")", "for", "original", "in", "values", ":", "try", ":", "parsed", "=", "cls", ".", "integer", "(", "original", ",", "bits", ")", "except", "TypeError", ":", "parsed", "=", "repr", "(", "original", ")", "print", ">>", "fd", ",", "parsed", "fd", ".", "close", "(", ")" ]
Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size}
[ "Write", "a", "list", "of", "mixed", "values", "to", "a", "file", ".", "If", "a", "file", "of", "the", "same", "name", "exists", "it", "s", "contents", "are", "replaced", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1571-L1578
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidObjectForm._init_map(self, record_types=record_types) self._my_map['url'] = self._url_default self._my_map['data'] = self._data_default self._my_map['accessibilityTypeId'] = self._accessibility_type_default self._my_map['assignedRepositoryIds'] = [str(kwargs['repository_id'])] self._my_map['assetId'] = str(kwargs['asset_id'])
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidObjectForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "self", ".", "_my_map", "[", "'url'", "]", "=", "self", ".", "_url_default", "self", ".", "_my_map", "[", "'data'", "]", "=", "self", ".", "_data_default", "self", ".", "_my_map", "[", "'accessibilityTypeId'", "]", "=", "self", ".", "_accessibility_type_default", "self", ".", "_my_map", "[", "'assignedRepositoryIds'", "]", "=", "[", "str", "(", "kwargs", "[", "'repository_id'", "]", ")", "]", "self", ".", "_my_map", "[", "'assetId'", "]", "=", "str", "(", "kwargs", "[", "'asset_id'", "]", ")" ]
Initialize form map
[ "Initialize", "form", "map" ]
python
train
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2212-L2217
def flipVertical(self): """ flips an image object vertically """ self.flipV = not self.flipV self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
[ "def", "flipVertical", "(", "self", ")", ":", "self", ".", "flipV", "=", "not", "self", ".", "flipV", "self", ".", "_transmogrophy", "(", "self", ".", "angle", ",", "self", ".", "percent", ",", "self", ".", "scaleFromCenter", ",", "self", ".", "flipH", ",", "self", ".", "flipV", ")" ]
flips an image object vertically
[ "flips", "an", "image", "object", "vertically" ]
python
train
bwhite/hadoopy
hadoopy/_hdfs.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_hdfs.py#L229-L251
def writetb(path, kvs, java_mem_mb=256): """Write typedbytes sequence file to HDFS given an iterator of KeyValue pairs :param path: HDFS path (string) :param kvs: Iterator of (key, value) :param java_mem_mb: Integer of java heap size in MB (default 256) :raises: IOError: An error occurred while saving the data. """ read_fd, write_fd = os.pipe() read_fp = os.fdopen(read_fd, 'r') hstreaming = _find_hstreaming() cmd = 'hadoop jar %s loadtb %s' % (hstreaming, path) p = _hadoop_fs_command(cmd, stdin=read_fp, java_mem_mb=java_mem_mb) read_fp.close() with hadoopy.TypedBytesFile(write_fd=write_fd) as tb_fp: for kv in kvs: if p.poll() is not None: raise IOError('writetb: Hadoop process quit while we were sending it data. Hadoop output below...\nstdout\n%s\nstderr\n%s' % p.communicate()) tb_fp.write(kv) tb_fp.flush() p.wait() if p.returncode is not 0: raise IOError('writetb: Hadoop process returned [%d]. Hadoop output below...\nstderr\n%s' % (p.returncode, p.stderr.read()))
[ "def", "writetb", "(", "path", ",", "kvs", ",", "java_mem_mb", "=", "256", ")", ":", "read_fd", ",", "write_fd", "=", "os", ".", "pipe", "(", ")", "read_fp", "=", "os", ".", "fdopen", "(", "read_fd", ",", "'r'", ")", "hstreaming", "=", "_find_hstreaming", "(", ")", "cmd", "=", "'hadoop jar %s loadtb %s'", "%", "(", "hstreaming", ",", "path", ")", "p", "=", "_hadoop_fs_command", "(", "cmd", ",", "stdin", "=", "read_fp", ",", "java_mem_mb", "=", "java_mem_mb", ")", "read_fp", ".", "close", "(", ")", "with", "hadoopy", ".", "TypedBytesFile", "(", "write_fd", "=", "write_fd", ")", "as", "tb_fp", ":", "for", "kv", "in", "kvs", ":", "if", "p", ".", "poll", "(", ")", "is", "not", "None", ":", "raise", "IOError", "(", "'writetb: Hadoop process quit while we were sending it data. Hadoop output below...\\nstdout\\n%s\\nstderr\\n%s'", "%", "p", ".", "communicate", "(", ")", ")", "tb_fp", ".", "write", "(", "kv", ")", "tb_fp", ".", "flush", "(", ")", "p", ".", "wait", "(", ")", "if", "p", ".", "returncode", "is", "not", "0", ":", "raise", "IOError", "(", "'writetb: Hadoop process returned [%d]. Hadoop output below...\\nstderr\\n%s'", "%", "(", "p", ".", "returncode", ",", "p", ".", "stderr", ".", "read", "(", ")", ")", ")" ]
Write typedbytes sequence file to HDFS given an iterator of KeyValue pairs :param path: HDFS path (string) :param kvs: Iterator of (key, value) :param java_mem_mb: Integer of java heap size in MB (default 256) :raises: IOError: An error occurred while saving the data.
[ "Write", "typedbytes", "sequence", "file", "to", "HDFS", "given", "an", "iterator", "of", "KeyValue", "pairs" ]
python
train
riccardocagnasso/useless
src/useless/common/__init__.py
https://github.com/riccardocagnasso/useless/blob/5167aab82958f653148e3689c9a7e548d4fa2cba/src/useless/common/__init__.py#L29-L48
def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
[ "def", "parse_cstring", "(", "stream", ",", "offset", ")", ":", "stream", ".", "seek", "(", "offset", ")", "string", "=", "\"\"", "while", "True", ":", "char", "=", "struct", ".", "unpack", "(", "'c'", ",", "stream", ".", "read", "(", "1", ")", ")", "[", "0", "]", "if", "char", "==", "b'\\x00'", ":", "return", "string", "else", ":", "string", "+=", "char", ".", "decode", "(", ")" ]
parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support
[ "parse_cstring", "will", "parse", "a", "null", "-", "terminated", "string", "in", "a", "bytestream", "." ]
python
train
nrcharles/caelum
caelum/tools.py
https://github.com/nrcharles/caelum/blob/9a8e65806385978556d7bb2e6870f003ff82023e/caelum/tools.py#L20-L29
def download(url, filename): """download and extract file.""" logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum') opener = urllib2.build_opener() local_file = open(filename, 'w') local_file.write(opener.open(request).read()) local_file.close()
[ "def", "download", "(", "url", ",", "filename", ")", ":", "logger", ".", "info", "(", "\"Downloading %s\"", ",", "url", ")", "request", "=", "urllib2", ".", "Request", "(", "url", ")", "request", ".", "add_header", "(", "'User-Agent'", ",", "'caelum/0.1 +https://github.com/nrcharles/caelum'", ")", "opener", "=", "urllib2", ".", "build_opener", "(", ")", "local_file", "=", "open", "(", "filename", ",", "'w'", ")", "local_file", ".", "write", "(", "opener", ".", "open", "(", "request", ")", ".", "read", "(", ")", ")", "local_file", ".", "close", "(", ")" ]
download and extract file.
[ "download", "and", "extract", "file", "." ]
python
train
angr/angr
angr/analyses/vfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L1187-L1213
def _set_return_address(self, state, ret_addr): """ Set the return address of the current state to a specific address. We assume we are at the beginning of a function, or in other words, we are about to execute the very first instruction of the function. :param SimState state: The program state :param int ret_addr: The return address :return: None """ # TODO: the following code is totally untested other than X86 and AMD64. Don't freak out if you find bugs :) # TODO: Test it ret_bvv = state.solver.BVV(ret_addr, self.project.arch.bits) if self.project.arch.name in ('X86', 'AMD64'): state.stack_push(ret_bvv) elif is_arm_arch(self.project.arch): state.regs.lr = ret_bvv elif self.project.arch.name in ('MIPS32', 'MIPS64'): state.regs.ra = ret_bvv elif self.project.arch.name in ('PPC32', 'PPC64'): state.regs.lr = ret_bvv else: l.warning('Return address cannot be set for architecture %s. Please add corresponding logic to ' 'VFG._set_return_address().', self.project.arch.name )
[ "def", "_set_return_address", "(", "self", ",", "state", ",", "ret_addr", ")", ":", "# TODO: the following code is totally untested other than X86 and AMD64. Don't freak out if you find bugs :)", "# TODO: Test it", "ret_bvv", "=", "state", ".", "solver", ".", "BVV", "(", "ret_addr", ",", "self", ".", "project", ".", "arch", ".", "bits", ")", "if", "self", ".", "project", ".", "arch", ".", "name", "in", "(", "'X86'", ",", "'AMD64'", ")", ":", "state", ".", "stack_push", "(", "ret_bvv", ")", "elif", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", ":", "state", ".", "regs", ".", "lr", "=", "ret_bvv", "elif", "self", ".", "project", ".", "arch", ".", "name", "in", "(", "'MIPS32'", ",", "'MIPS64'", ")", ":", "state", ".", "regs", ".", "ra", "=", "ret_bvv", "elif", "self", ".", "project", ".", "arch", ".", "name", "in", "(", "'PPC32'", ",", "'PPC64'", ")", ":", "state", ".", "regs", ".", "lr", "=", "ret_bvv", "else", ":", "l", ".", "warning", "(", "'Return address cannot be set for architecture %s. Please add corresponding logic to '", "'VFG._set_return_address().'", ",", "self", ".", "project", ".", "arch", ".", "name", ")" ]
Set the return address of the current state to a specific address. We assume we are at the beginning of a function, or in other words, we are about to execute the very first instruction of the function. :param SimState state: The program state :param int ret_addr: The return address :return: None
[ "Set", "the", "return", "address", "of", "the", "current", "state", "to", "a", "specific", "address", ".", "We", "assume", "we", "are", "at", "the", "beginning", "of", "a", "function", "or", "in", "other", "words", "we", "are", "about", "to", "execute", "the", "very", "first", "instruction", "of", "the", "function", "." ]
python
train
pycontribs/python-crowd
crowd.py
https://github.com/pycontribs/python-crowd/blob/a075e45774dd5baecf0217843cda747084268e32/crowd.py#L589-L609
def get_nested_group_users(self, groupname): """Retrieves a list of all users that directly or indirectly belong to the given groupname. Args: groupname: The group name. Returns: list: A list of strings of user names. """ response = self._get(self.rest_url + "/group/user/nested", params={"groupname": groupname, "start-index": 0, "max-results": 99999}) if not response.ok: return None return [u['name'] for u in response.json()['users']]
[ "def", "get_nested_group_users", "(", "self", ",", "groupname", ")", ":", "response", "=", "self", ".", "_get", "(", "self", ".", "rest_url", "+", "\"/group/user/nested\"", ",", "params", "=", "{", "\"groupname\"", ":", "groupname", ",", "\"start-index\"", ":", "0", ",", "\"max-results\"", ":", "99999", "}", ")", "if", "not", "response", ".", "ok", ":", "return", "None", "return", "[", "u", "[", "'name'", "]", "for", "u", "in", "response", ".", "json", "(", ")", "[", "'users'", "]", "]" ]
Retrieves a list of all users that directly or indirectly belong to the given groupname. Args: groupname: The group name. Returns: list: A list of strings of user names.
[ "Retrieves", "a", "list", "of", "all", "users", "that", "directly", "or", "indirectly", "belong", "to", "the", "given", "groupname", "." ]
python
train
bitshares/uptick
uptick/proposal.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/proposal.py#L21-L24
def disapproveproposal(ctx, proposal, account): """ Disapprove a proposal """ print_tx(ctx.bitshares.disapproveproposal(proposal, account=account))
[ "def", "disapproveproposal", "(", "ctx", ",", "proposal", ",", "account", ")", ":", "print_tx", "(", "ctx", ".", "bitshares", ".", "disapproveproposal", "(", "proposal", ",", "account", "=", "account", ")", ")" ]
Disapprove a proposal
[ "Disapprove", "a", "proposal" ]
python
train
davebridges/mousedb
mousedb/animal/views.py
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/views.py#L514-L525
def date_archive_year(request): """This view will generate a table of the number of mice born on an annual basis. This view is associated with the url name archive-home, and returns an dictionary of a date and a animal count.""" oldest_animal = Animal.objects.filter(Born__isnull=False).order_by('Born')[0] archive_dict = {} tested_year = oldest_animal.Born.year while tested_year <= datetime.date.today().year: archive_dict[tested_year] = Animal.objects.filter(Born__year=tested_year).count() tested_year = tested_year + 1 return render(request, 'animal_archive.html', {"archive_dict": archive_dict})
[ "def", "date_archive_year", "(", "request", ")", ":", "oldest_animal", "=", "Animal", ".", "objects", ".", "filter", "(", "Born__isnull", "=", "False", ")", ".", "order_by", "(", "'Born'", ")", "[", "0", "]", "archive_dict", "=", "{", "}", "tested_year", "=", "oldest_animal", ".", "Born", ".", "year", "while", "tested_year", "<=", "datetime", ".", "date", ".", "today", "(", ")", ".", "year", ":", "archive_dict", "[", "tested_year", "]", "=", "Animal", ".", "objects", ".", "filter", "(", "Born__year", "=", "tested_year", ")", ".", "count", "(", ")", "tested_year", "=", "tested_year", "+", "1", "return", "render", "(", "request", ",", "'animal_archive.html'", ",", "{", "\"archive_dict\"", ":", "archive_dict", "}", ")" ]
This view will generate a table of the number of mice born on an annual basis. This view is associated with the url name archive-home, and returns an dictionary of a date and a animal count.
[ "This", "view", "will", "generate", "a", "table", "of", "the", "number", "of", "mice", "born", "on", "an", "annual", "basis", ".", "This", "view", "is", "associated", "with", "the", "url", "name", "archive", "-", "home", "and", "returns", "an", "dictionary", "of", "a", "date", "and", "a", "animal", "count", "." ]
python
train
pallets/werkzeug
src/werkzeug/_reloader.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/_reloader.py#L43-L60
def _find_observable_paths(extra_files=None): """Finds all paths that should be observed.""" rv = set( os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x) for x in sys.path ) for filename in extra_files or (): rv.add(os.path.dirname(os.path.abspath(filename))) for module in list(sys.modules.values()): fn = getattr(module, "__file__", None) if fn is None: continue fn = os.path.abspath(fn) rv.add(os.path.dirname(fn)) return _find_common_roots(rv)
[ "def", "_find_observable_paths", "(", "extra_files", "=", "None", ")", ":", "rv", "=", "set", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "x", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "x", ")", "else", "os", ".", "path", ".", "abspath", "(", "x", ")", "for", "x", "in", "sys", ".", "path", ")", "for", "filename", "in", "extra_files", "or", "(", ")", ":", "rv", ".", "add", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", ")", "for", "module", "in", "list", "(", "sys", ".", "modules", ".", "values", "(", ")", ")", ":", "fn", "=", "getattr", "(", "module", ",", "\"__file__\"", ",", "None", ")", "if", "fn", "is", "None", ":", "continue", "fn", "=", "os", ".", "path", ".", "abspath", "(", "fn", ")", "rv", ".", "add", "(", "os", ".", "path", ".", "dirname", "(", "fn", ")", ")", "return", "_find_common_roots", "(", "rv", ")" ]
Finds all paths that should be observed.
[ "Finds", "all", "paths", "that", "should", "be", "observed", "." ]
python
train
pytroll/satpy
satpy/multiscene.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L234-L244
def _generate_scene_func(self, gen, func_name, create_new_scene, *args, **kwargs): """Abstract method for running a Scene method on each Scene. Additionally, modifies current MultiScene or creates a new one if needed. """ new_gen = self._call_scene_func(gen, func_name, create_new_scene, *args, **kwargs) new_gen = new_gen if self.is_generator else list(new_gen) if create_new_scene: return self.__class__(new_gen) self._scene_gen = _SceneGenerator(new_gen) self._scenes = iter(self._scene_gen)
[ "def", "_generate_scene_func", "(", "self", ",", "gen", ",", "func_name", ",", "create_new_scene", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new_gen", "=", "self", ".", "_call_scene_func", "(", "gen", ",", "func_name", ",", "create_new_scene", ",", "*", "args", ",", "*", "*", "kwargs", ")", "new_gen", "=", "new_gen", "if", "self", ".", "is_generator", "else", "list", "(", "new_gen", ")", "if", "create_new_scene", ":", "return", "self", ".", "__class__", "(", "new_gen", ")", "self", ".", "_scene_gen", "=", "_SceneGenerator", "(", "new_gen", ")", "self", ".", "_scenes", "=", "iter", "(", "self", ".", "_scene_gen", ")" ]
Abstract method for running a Scene method on each Scene. Additionally, modifies current MultiScene or creates a new one if needed.
[ "Abstract", "method", "for", "running", "a", "Scene", "method", "on", "each", "Scene", "." ]
python
train
tempodb/tempodb-python
tempodb/client.py
https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L545-L568
def write_data(self, key, data, tags=[], attrs={}): """Write a set a datapoints into a series by its key. For now, the tags and attributes arguments are ignored. :param string key: the series to write data into :param list data: a list of DataPoints to write :rtype: :class:`tempodb.response.Response` object""" url = make_series_url(key) url = urlparse.urljoin(url + '/', 'data') #revisit later if there are server changes to take these into #account #params = { # 'tag': tag, # 'attr': attr, #} #url_args = endpoint.make_url_args(params) #url = '?'.join([url, url_args]) dlist = [d.to_dictionary() for d in data] body = json.dumps(dlist) resp = self.session.post(url, body) return resp
[ "def", "write_data", "(", "self", ",", "key", ",", "data", ",", "tags", "=", "[", "]", ",", "attrs", "=", "{", "}", ")", ":", "url", "=", "make_series_url", "(", "key", ")", "url", "=", "urlparse", ".", "urljoin", "(", "url", "+", "'/'", ",", "'data'", ")", "#revisit later if there are server changes to take these into", "#account", "#params = {", "# 'tag': tag,", "# 'attr': attr,", "#}", "#url_args = endpoint.make_url_args(params)", "#url = '?'.join([url, url_args])", "dlist", "=", "[", "d", ".", "to_dictionary", "(", ")", "for", "d", "in", "data", "]", "body", "=", "json", ".", "dumps", "(", "dlist", ")", "resp", "=", "self", ".", "session", ".", "post", "(", "url", ",", "body", ")", "return", "resp" ]
Write a set a datapoints into a series by its key. For now, the tags and attributes arguments are ignored. :param string key: the series to write data into :param list data: a list of DataPoints to write :rtype: :class:`tempodb.response.Response` object
[ "Write", "a", "set", "a", "datapoints", "into", "a", "series", "by", "its", "key", ".", "For", "now", "the", "tags", "and", "attributes", "arguments", "are", "ignored", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2649-L2663
def get_comments_of_credit_note_per_page(self, credit_note_id, per_page=1000, page=1): """ Get comments of credit note per page :param credit_note_id: the credit note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=CREDIT_NOTE_COMMENTS, per_page=per_page, page=page, params={'credit_note_id': credit_note_id}, )
[ "def", "get_comments_of_credit_note_per_page", "(", "self", ",", "credit_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "CREDIT_NOTE_COMMENTS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'credit_note_id'", ":", "credit_note_id", "}", ",", ")" ]
Get comments of credit note per page :param credit_note_id: the credit note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "comments", "of", "credit", "note", "per", "page" ]
python
train
iwanbk/nyamuk
nyamuk/nyamuk.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L497-L509
def handle_pubcomp(self): """Handle incoming PUBCOMP packet.""" self.logger.info("PUBCOMP received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubcomp(mid) self.push_event(evt) return NC.ERR_SUCCESS
[ "def", "handle_pubcomp", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"PUBCOMP received\"", ")", "ret", ",", "mid", "=", "self", ".", "in_packet", ".", "read_uint16", "(", ")", "if", "ret", "!=", "NC", ".", "ERR_SUCCESS", ":", "return", "ret", "evt", "=", "event", ".", "EventPubcomp", "(", "mid", ")", "self", ".", "push_event", "(", "evt", ")", "return", "NC", ".", "ERR_SUCCESS" ]
Handle incoming PUBCOMP packet.
[ "Handle", "incoming", "PUBCOMP", "packet", "." ]
python
train
prompt-toolkit/pymux
pymux/commands/commands.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L595-L599
def display_message(pymux, variables): " Display a message. " message = variables['<message>'] client_state = pymux.get_client_state() client_state.message = message
[ "def", "display_message", "(", "pymux", ",", "variables", ")", ":", "message", "=", "variables", "[", "'<message>'", "]", "client_state", "=", "pymux", ".", "get_client_state", "(", ")", "client_state", ".", "message", "=", "message" ]
Display a message.
[ "Display", "a", "message", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_2_00/igmp_snooping_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/igmp_snooping_state/__init__.py#L190-L213
def _set_debug_igmp(self, v, load=False): """ Setter method for debug_igmp, mapped from YANG variable /igmp_snooping_state/debug_igmp (container) If this variable is read-only (config: false) in the source YANG file, then _set_debug_igmp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_debug_igmp() directly. YANG Description: Debug info for IGMP Snooping """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=debug_igmp.debug_igmp, is_container='container', presence=False, yang_name="debug-igmp", rest_name="debug-igmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-show-debug', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """debug_igmp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=debug_igmp.debug_igmp, is_container='container', presence=False, yang_name="debug-igmp", rest_name="debug-igmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-show-debug', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=False)""", }) self.__debug_igmp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_debug_igmp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "debug_igmp", ".", "debug_igmp", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"debug-igmp\"", ",", "rest_name", "=", "\"debug-igmp\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'mc-hms-igmp-show-debug'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mc-hms-operational'", ",", "defining_module", "=", "'brocade-mc-hms-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"debug_igmp must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=debug_igmp.debug_igmp, is_container='container', presence=False, yang_name=\"debug-igmp\", rest_name=\"debug-igmp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-show-debug', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__debug_igmp", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for debug_igmp, mapped from YANG variable /igmp_snooping_state/debug_igmp (container) If this variable is read-only (config: false) in the source YANG file, then _set_debug_igmp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_debug_igmp() directly. YANG Description: Debug info for IGMP Snooping
[ "Setter", "method", "for", "debug_igmp", "mapped", "from", "YANG", "variable", "/", "igmp_snooping_state", "/", "debug_igmp", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_debug_igmp", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_debug_igmp", "()", "directly", "." ]
python
train
mkaz/termgraph
termgraph/termgraph.py
https://github.com/mkaz/termgraph/blob/c40b86454d380d685785b98834364b111734c163/termgraph/termgraph.py#L175-L203
def normalize(data, width): """Normalize the data and return it.""" min_dat = find_min(data) # We offset by the minimum if there's a negative. off_data = [] if min_dat < 0: min_dat = abs(min_dat) for dat in data: off_data.append([_d + min_dat for _d in dat]) else: off_data = data min_dat = find_min(off_data) max_dat = find_max(off_data) if max_dat < width: # Don't need to normalize if the max value # is less than the width we allow. return off_data # max_dat / width is the value for a single tick. norm_factor is the # inverse of this value # If you divide a number to the value of single tick, you will find how # many ticks it does contain basically. norm_factor = width / float(max_dat) normal_dat = [] for dat in off_data: normal_dat.append([_v * norm_factor for _v in dat]) return normal_dat
[ "def", "normalize", "(", "data", ",", "width", ")", ":", "min_dat", "=", "find_min", "(", "data", ")", "# We offset by the minimum if there's a negative.", "off_data", "=", "[", "]", "if", "min_dat", "<", "0", ":", "min_dat", "=", "abs", "(", "min_dat", ")", "for", "dat", "in", "data", ":", "off_data", ".", "append", "(", "[", "_d", "+", "min_dat", "for", "_d", "in", "dat", "]", ")", "else", ":", "off_data", "=", "data", "min_dat", "=", "find_min", "(", "off_data", ")", "max_dat", "=", "find_max", "(", "off_data", ")", "if", "max_dat", "<", "width", ":", "# Don't need to normalize if the max value", "# is less than the width we allow.", "return", "off_data", "# max_dat / width is the value for a single tick. norm_factor is the", "# inverse of this value", "# If you divide a number to the value of single tick, you will find how", "# many ticks it does contain basically.", "norm_factor", "=", "width", "/", "float", "(", "max_dat", ")", "normal_dat", "=", "[", "]", "for", "dat", "in", "off_data", ":", "normal_dat", ".", "append", "(", "[", "_v", "*", "norm_factor", "for", "_v", "in", "dat", "]", ")", "return", "normal_dat" ]
Normalize the data and return it.
[ "Normalize", "the", "data", "and", "return", "it", "." ]
python
train
PyPSA/PyPSA
pypsa/io.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L372-L392
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False): """ Import network data from CSVs in a folder. The CSVs must follow the standard form, see pypsa/examples. Parameters ---------- csv_folder_name : string Name of folder encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ skip_time : bool, default False Skip reading in time dependent attributes """ basename = os.path.basename(csv_folder_name) with ImporterCSV(csv_folder_name, encoding=encoding) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
[ "def", "import_from_csv_folder", "(", "network", ",", "csv_folder_name", ",", "encoding", "=", "None", ",", "skip_time", "=", "False", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "csv_folder_name", ")", "with", "ImporterCSV", "(", "csv_folder_name", ",", "encoding", "=", "encoding", ")", "as", "importer", ":", "_import_from_importer", "(", "network", ",", "importer", ",", "basename", "=", "basename", ",", "skip_time", "=", "skip_time", ")" ]
Import network data from CSVs in a folder. The CSVs must follow the standard form, see pypsa/examples. Parameters ---------- csv_folder_name : string Name of folder encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ skip_time : bool, default False Skip reading in time dependent attributes
[ "Import", "network", "data", "from", "CSVs", "in", "a", "folder", "." ]
python
train
Kortemme-Lab/klab
klab/google/gcalendar.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/google/gcalendar.py#L321-L362
def get_events(self, start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = True, restrict_to_calendars = []): '''A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are: description, end, htmlLink, location, organizer, start, summary Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False." ''' es = [] calendar_ids = restrict_to_calendars or self.calendar_ids for calendar_id in calendar_ids: now = datetime.now(tz = self.timezone) events = [] page_token = None while True: events = self.service.events().list(pageToken=page_token, maxResults = 250, calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute() for event in events['items']: dt = None nb = DeepNonStrictNestedBunch(event) assert(not(nb._event)) nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar if (not ignore_cancelled) or (nb.status != 'cancelled'): # Ignore cancelled events if nb.recurrence: if get_recurring_events_as_instances: # Retrieve all occurrences of the recurring event within the timeframe es += self.get_recurring_events(calendar_id, nb.id, start_time, end_time) else: es.append(nb) elif nb.start.dateTime: dt = dateutil.parser.parse(nb.start.dateTime) elif nb.start.date: dt = dateutil.parser.parse(nb.start.date) dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) if dt: nb.datetime_o = dt nb.calendar_id = calendar_id es.append(nb) page_token = events.get('nextPageToken') if not page_token: break es.sort(key=lambda x: x.datetime_o) return es
[ "def", "get_events", "(", "self", ",", "start_time", ",", "end_time", ",", "ignore_cancelled", "=", "True", ",", "get_recurring_events_as_instances", "=", "True", ",", "restrict_to_calendars", "=", "[", "]", ")", ":", "es", "=", "[", "]", "calendar_ids", "=", "restrict_to_calendars", "or", "self", ".", "calendar_ids", "for", "calendar_id", "in", "calendar_ids", ":", "now", "=", "datetime", ".", "now", "(", "tz", "=", "self", ".", "timezone", ")", "events", "=", "[", "]", "page_token", "=", "None", "while", "True", ":", "events", "=", "self", ".", "service", ".", "events", "(", ")", ".", "list", "(", "pageToken", "=", "page_token", ",", "maxResults", "=", "250", ",", "calendarId", "=", "self", ".", "configured_calendar_ids", "[", "calendar_id", "]", ",", "timeMin", "=", "start_time", ",", "timeMax", "=", "end_time", ",", "showDeleted", "=", "False", ")", ".", "execute", "(", ")", "for", "event", "in", "events", "[", "'items'", "]", ":", "dt", "=", "None", "nb", "=", "DeepNonStrictNestedBunch", "(", "event", ")", "assert", "(", "not", "(", "nb", ".", "_event", ")", ")", "nb", ".", "_event", "=", "event", "# keep the original event as returned in case we want to reuse it e.g. insert it into another calendar", "if", "(", "not", "ignore_cancelled", ")", "or", "(", "nb", ".", "status", "!=", "'cancelled'", ")", ":", "# Ignore cancelled events", "if", "nb", ".", "recurrence", ":", "if", "get_recurring_events_as_instances", ":", "# Retrieve all occurrences of the recurring event within the timeframe", "es", "+=", "self", ".", "get_recurring_events", "(", "calendar_id", ",", "nb", ".", "id", ",", "start_time", ",", "end_time", ")", "else", ":", "es", ".", "append", "(", "nb", ")", "elif", "nb", ".", "start", ".", "dateTime", ":", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "nb", ".", "start", ".", "dateTime", ")", "elif", "nb", ".", "start", ".", "date", ":", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "nb", ".", "start", ".", "date", ")", "dt", "=", "datetime", "(", "year", "=", "dt", ".", "year", ",", "month", "=", "dt", ".", "month", ",", "day", "=", "dt", ".", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "tzinfo", "=", "self", ".", "timezone", ")", "if", "dt", ":", "nb", ".", "datetime_o", "=", "dt", "nb", ".", "calendar_id", "=", "calendar_id", "es", ".", "append", "(", "nb", ")", "page_token", "=", "events", ".", "get", "(", "'nextPageToken'", ")", "if", "not", "page_token", ":", "break", "es", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "datetime_o", ")", "return", "es" ]
A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are: description, end, htmlLink, location, organizer, start, summary Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
[ "A", "wrapper", "for", "events", "()", ".", "list", ".", "Returns", "the", "events", "from", "the", "calendar", "within", "the", "specified", "times", ".", "Some", "of", "the", "interesting", "fields", "are", ":", "description", "end", "htmlLink", "location", "organizer", "start", "summary" ]
python
train
SectorLabs/django-postgres-extra
psqlextra/fields/hstore_field.py
https://github.com/SectorLabs/django-postgres-extra/blob/eef2ed5504d225858d4e4f5d77a838082ca6053e/psqlextra/fields/hstore_field.py#L53-L66
def deconstruct(self): """Gets the values to pass to :see:__init__ when re-creating this object.""" name, path, args, kwargs = super( HStoreField, self).deconstruct() if self.uniqueness is not None: kwargs['uniqueness'] = self.uniqueness if self.required is not None: kwargs['required'] = self.required return name, path, args, kwargs
[ "def", "deconstruct", "(", "self", ")", ":", "name", ",", "path", ",", "args", ",", "kwargs", "=", "super", "(", "HStoreField", ",", "self", ")", ".", "deconstruct", "(", ")", "if", "self", ".", "uniqueness", "is", "not", "None", ":", "kwargs", "[", "'uniqueness'", "]", "=", "self", ".", "uniqueness", "if", "self", ".", "required", "is", "not", "None", ":", "kwargs", "[", "'required'", "]", "=", "self", ".", "required", "return", "name", ",", "path", ",", "args", ",", "kwargs" ]
Gets the values to pass to :see:__init__ when re-creating this object.
[ "Gets", "the", "values", "to", "pass", "to", ":", "see", ":", "__init__", "when", "re", "-", "creating", "this", "object", "." ]
python
test
NICTA/revrand
revrand/likelihoods.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/likelihoods.py#L500-L521
def df(self, y, f): r""" Derivative of Poisson log likelihood w.r.t.\ f. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f` """ y, f = np.broadcast_arrays(y, f) if self.tranfcn == 'exp': return y - np.exp(f) else: return expit(f) * (y / safesoftplus(f) - 1)
[ "def", "df", "(", "self", ",", "y", ",", "f", ")", ":", "y", ",", "f", "=", "np", ".", "broadcast_arrays", "(", "y", ",", "f", ")", "if", "self", ".", "tranfcn", "==", "'exp'", ":", "return", "y", "-", "np", ".", "exp", "(", "f", ")", "else", ":", "return", "expit", "(", "f", ")", "*", "(", "y", "/", "safesoftplus", "(", "f", ")", "-", "1", ")" ]
r""" Derivative of Poisson log likelihood w.r.t.\ f. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- df: ndarray the derivative :math:`\partial \log p(y|f) / \partial f`
[ "r", "Derivative", "of", "Poisson", "log", "likelihood", "w", ".", "r", ".", "t", ".", "\\", "f", "." ]
python
train
core/uricore
uricore/wkz_urls.py
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_urls.py#L116-L160
def iri_to_uri(iri, charset='utf-8'): r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th' .. versionadded:: 0.6 :param iri: the iri to convert :param charset: the charset for the URI """ iri = unicode(iri) scheme, auth, hostname, port, path, query, fragment = _uri_split(iri) scheme = scheme.encode('ascii') hostname = hostname.encode('idna') if ':' in hostname: hostname = '[' + hostname + ']' if auth: if ':' in auth: auth, password = auth.split(':', 1) else: password = None auth = _quote(auth.encode(charset)) if password: auth += ':' + _quote(password.encode(charset)) hostname = auth + '@' + hostname if port: hostname += ':' + port path = _quote(path.encode(charset), safe="/:~+%") query = _quote(query.encode(charset), safe="=%&[]:;$()+,!?*/") # this absolutely always must return a string. Otherwise some parts of # the system might perform double quoting (#61) return str(urlparse.urlunsplit([scheme, hostname, path, query, fragment]))
[ "def", "iri_to_uri", "(", "iri", ",", "charset", "=", "'utf-8'", ")", ":", "iri", "=", "unicode", "(", "iri", ")", "scheme", ",", "auth", ",", "hostname", ",", "port", ",", "path", ",", "query", ",", "fragment", "=", "_uri_split", "(", "iri", ")", "scheme", "=", "scheme", ".", "encode", "(", "'ascii'", ")", "hostname", "=", "hostname", ".", "encode", "(", "'idna'", ")", "if", "':'", "in", "hostname", ":", "hostname", "=", "'['", "+", "hostname", "+", "']'", "if", "auth", ":", "if", "':'", "in", "auth", ":", "auth", ",", "password", "=", "auth", ".", "split", "(", "':'", ",", "1", ")", "else", ":", "password", "=", "None", "auth", "=", "_quote", "(", "auth", ".", "encode", "(", "charset", ")", ")", "if", "password", ":", "auth", "+=", "':'", "+", "_quote", "(", "password", ".", "encode", "(", "charset", ")", ")", "hostname", "=", "auth", "+", "'@'", "+", "hostname", "if", "port", ":", "hostname", "+=", "':'", "+", "port", "path", "=", "_quote", "(", "path", ".", "encode", "(", "charset", ")", ",", "safe", "=", "\"/:~+%\"", ")", "query", "=", "_quote", "(", "query", ".", "encode", "(", "charset", ")", ",", "safe", "=", "\"=%&[]:;$()+,!?*/\"", ")", "# this absolutely always must return a string. Otherwise some parts of", "# the system might perform double quoting (#61)", "return", "str", "(", "urlparse", ".", "urlunsplit", "(", "[", "scheme", ",", "hostname", ",", "path", ",", "query", ",", "fragment", "]", ")", ")" ]
r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th' .. versionadded:: 0.6 :param iri: the iri to convert :param charset: the charset for the URI
[ "r", "Converts", "any", "unicode", "based", "IRI", "to", "an", "acceptable", "ASCII", "URI", ".", "Werkzeug", "always", "uses", "utf", "-", "8", "URLs", "internally", "because", "this", "is", "what", "browsers", "and", "HTTP", "do", "as", "well", ".", "In", "some", "places", "where", "it", "accepts", "an", "URL", "it", "also", "accepts", "a", "unicode", "IRI", "and", "converts", "it", "into", "a", "URI", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L3812-L3816
def setLineEnds(self, start, end): """setLineEnds(self, start, end)""" CheckParent(self) return _fitz.Annot_setLineEnds(self, start, end)
[ "def", "setLineEnds", "(", "self", ",", "start", ",", "end", ")", ":", "CheckParent", "(", "self", ")", "return", "_fitz", ".", "Annot_setLineEnds", "(", "self", ",", "start", ",", "end", ")" ]
setLineEnds(self, start, end)
[ "setLineEnds", "(", "self", "start", "end", ")" ]
python
train
fr33jc/bang
bang/config.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/config.py#L381-L426
def prepare(self): """ Reorganizes the data such that the deployment logic can find it all where it expects to be. The raw configuration file is intended to be as human-friendly as possible partly through the following mechanisms: - In order to minimize repetition, any attributes that are common to all server configurations can be specified in the ``server_common_attributes`` stanza even though the stanza itself does not map directly to a deployable resource. - For reference locality, each security group stanza contains its list of rules even though rules are actually created in a separate stage from the groups themselves. In order to make the :class:`Config` object more useful to the program logic, this method performs the following transformations: - Distributes the ``server_common_attributes`` among all the members of the ``servers`` stanza. - Extracts security group rules to a top-level key, and interpolates all source and target values. """ # TODO: take server_common_attributes and disperse it among the various # server stanzas # First stage - turn all the dicts (SERVER, SECGROUP, DATABASE, LOADBAL) # into lists now they're merged properly for stanza_key, name_key in ( (R.SERVERS, A.server.NAME), (R.SERVER_SECURITY_GROUPS, A.secgroup.NAME), (R.LOAD_BALANCERS, A.loadbalancer.NAME), (R.DATABASES, A.database.NAME), (R.BUCKETS, A.NAME), (R.QUEUES, A.NAME)): self[stanza_key] = self._convert_to_list(stanza_key, name_key) self._prepare_ssh_keys() self._prepare_secgroups() self._prepare_tags() self._prepare_dbs() self._prepare_servers() self._prepare_load_balancers() self._prepare_ansible()
[ "def", "prepare", "(", "self", ")", ":", "# TODO: take server_common_attributes and disperse it among the various", "# server stanzas", "# First stage - turn all the dicts (SERVER, SECGROUP, DATABASE, LOADBAL)", "# into lists now they're merged properly", "for", "stanza_key", ",", "name_key", "in", "(", "(", "R", ".", "SERVERS", ",", "A", ".", "server", ".", "NAME", ")", ",", "(", "R", ".", "SERVER_SECURITY_GROUPS", ",", "A", ".", "secgroup", ".", "NAME", ")", ",", "(", "R", ".", "LOAD_BALANCERS", ",", "A", ".", "loadbalancer", ".", "NAME", ")", ",", "(", "R", ".", "DATABASES", ",", "A", ".", "database", ".", "NAME", ")", ",", "(", "R", ".", "BUCKETS", ",", "A", ".", "NAME", ")", ",", "(", "R", ".", "QUEUES", ",", "A", ".", "NAME", ")", ")", ":", "self", "[", "stanza_key", "]", "=", "self", ".", "_convert_to_list", "(", "stanza_key", ",", "name_key", ")", "self", ".", "_prepare_ssh_keys", "(", ")", "self", ".", "_prepare_secgroups", "(", ")", "self", ".", "_prepare_tags", "(", ")", "self", ".", "_prepare_dbs", "(", ")", "self", ".", "_prepare_servers", "(", ")", "self", ".", "_prepare_load_balancers", "(", ")", "self", ".", "_prepare_ansible", "(", ")" ]
Reorganizes the data such that the deployment logic can find it all where it expects to be. The raw configuration file is intended to be as human-friendly as possible partly through the following mechanisms: - In order to minimize repetition, any attributes that are common to all server configurations can be specified in the ``server_common_attributes`` stanza even though the stanza itself does not map directly to a deployable resource. - For reference locality, each security group stanza contains its list of rules even though rules are actually created in a separate stage from the groups themselves. In order to make the :class:`Config` object more useful to the program logic, this method performs the following transformations: - Distributes the ``server_common_attributes`` among all the members of the ``servers`` stanza. - Extracts security group rules to a top-level key, and interpolates all source and target values.
[ "Reorganizes", "the", "data", "such", "that", "the", "deployment", "logic", "can", "find", "it", "all", "where", "it", "expects", "to", "be", "." ]
python
train
sci-bots/dmf-device-ui
dmf_device_ui/plugin.py
https://github.com/sci-bots/dmf-device-ui/blob/05b480683c9fa43f91ce5a58de2fa90cdf363fc8/dmf_device_ui/plugin.py#L207-L218
def on_execute__set_surface_alphas(self, request): ''' .. versionchanged:: 0.12 Queue redraw after setting surface alphas. ''' data = decode_content_data(request) logger.debug('[on_execute__set_surface_alphas] %s', data['surface_alphas']) for name, alpha in data['surface_alphas'].iteritems(): self.parent.canvas_slave.set_surface_alpha(name, alpha) self.parent.canvas_slave.render() gobject.idle_add(self.parent.canvas_slave.draw)
[ "def", "on_execute__set_surface_alphas", "(", "self", ",", "request", ")", ":", "data", "=", "decode_content_data", "(", "request", ")", "logger", ".", "debug", "(", "'[on_execute__set_surface_alphas] %s'", ",", "data", "[", "'surface_alphas'", "]", ")", "for", "name", ",", "alpha", "in", "data", "[", "'surface_alphas'", "]", ".", "iteritems", "(", ")", ":", "self", ".", "parent", ".", "canvas_slave", ".", "set_surface_alpha", "(", "name", ",", "alpha", ")", "self", ".", "parent", ".", "canvas_slave", ".", "render", "(", ")", "gobject", ".", "idle_add", "(", "self", ".", "parent", ".", "canvas_slave", ".", "draw", ")" ]
.. versionchanged:: 0.12 Queue redraw after setting surface alphas.
[ "..", "versionchanged", "::", "0", ".", "12", "Queue", "redraw", "after", "setting", "surface", "alphas", "." ]
python
train
sixty-north/cosmic-ray
src/cosmic_ray/plugins.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/plugins.py#L43-L50
def operator_names(): """Get all operator names. Returns: A sequence of operator names. """ return tuple('{}/{}'.format(provider_name, operator_name) for provider_name, provider in OPERATOR_PROVIDERS.items() for operator_name in provider)
[ "def", "operator_names", "(", ")", ":", "return", "tuple", "(", "'{}/{}'", ".", "format", "(", "provider_name", ",", "operator_name", ")", "for", "provider_name", ",", "provider", "in", "OPERATOR_PROVIDERS", ".", "items", "(", ")", "for", "operator_name", "in", "provider", ")" ]
Get all operator names. Returns: A sequence of operator names.
[ "Get", "all", "operator", "names", "." ]
python
train
saltstack/salt
salt/states/selinux.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/selinux.py#L458-L481
def fcontext_policy_applied(name, recursive=False): ''' .. versionadded:: 2017.7.0 Checks and makes sure the SELinux policies for a given filespec are applied. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} changes_text = __salt__['selinux.fcontext_policy_is_applied'](name, recursive) if changes_text == '': ret.update({'result': True, 'comment': 'SElinux policies are already applied for filespec "{0}"'.format(name)}) return ret if __opts__['test']: ret.update({'result': None}) else: apply_ret = __salt__['selinux.fcontext_apply_policy'](name, recursive) if apply_ret['retcode'] != 0: ret.update({'comment': apply_ret}) else: ret.update({'result': True}) ret.update({'changes': apply_ret.get('changes')}) return ret
[ "def", "fcontext_policy_applied", "(", "name", ",", "recursive", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "changes_text", "=", "__salt__", "[", "'selinux.fcontext_policy_is_applied'", "]", "(", "name", ",", "recursive", ")", "if", "changes_text", "==", "''", ":", "ret", ".", "update", "(", "{", "'result'", ":", "True", ",", "'comment'", ":", "'SElinux policies are already applied for filespec \"{0}\"'", ".", "format", "(", "name", ")", "}", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", ".", "update", "(", "{", "'result'", ":", "None", "}", ")", "else", ":", "apply_ret", "=", "__salt__", "[", "'selinux.fcontext_apply_policy'", "]", "(", "name", ",", "recursive", ")", "if", "apply_ret", "[", "'retcode'", "]", "!=", "0", ":", "ret", ".", "update", "(", "{", "'comment'", ":", "apply_ret", "}", ")", "else", ":", "ret", ".", "update", "(", "{", "'result'", ":", "True", "}", ")", "ret", ".", "update", "(", "{", "'changes'", ":", "apply_ret", ".", "get", "(", "'changes'", ")", "}", ")", "return", "ret" ]
.. versionadded:: 2017.7.0 Checks and makes sure the SELinux policies for a given filespec are applied.
[ "..", "versionadded", "::", "2017", ".", "7", ".", "0" ]
python
train
chrisrink10/basilisp
src/basilisp/importer.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/importer.py#L46-L79
def _get_basilisp_bytecode( fullname: str, mtime: int, source_size: int, cache_data: bytes ) -> List[types.CodeType]: """Unmarshal the bytes from a Basilisp bytecode cache file, validating the file header prior to returning. If the file header does not match, throw an exception.""" exc_details = {"name": fullname} magic = cache_data[:4] raw_timestamp = cache_data[4:8] raw_size = cache_data[8:12] if magic != MAGIC_NUMBER: message = ( f"Incorrect magic number ({magic}) in {fullname}; expected {MAGIC_NUMBER}" ) logger.debug(message) raise ImportError(message, **exc_details) # type: ignore elif len(raw_timestamp) != 4: message = f"Reached EOF while reading timestamp in {fullname}" logger.debug(message) raise EOFError(message) elif _r_long(raw_timestamp) != mtime: message = f"Non-matching timestamp ({_r_long(raw_timestamp)}) in {fullname} bytecode cache; expected {mtime}" logger.debug(message) raise ImportError(message, **exc_details) # type: ignore elif len(raw_size) != 4: message = f"Reached EOF while reading size of source in {fullname}" logger.debug(message) raise EOFError(message) elif _r_long(raw_size) != source_size: message = f"Non-matching filesize ({_r_long(raw_size)}) in {fullname} bytecode cache; expected {source_size}" logger.debug(message) raise ImportError(message, **exc_details) # type: ignore return marshal.loads(cache_data[12:])
[ "def", "_get_basilisp_bytecode", "(", "fullname", ":", "str", ",", "mtime", ":", "int", ",", "source_size", ":", "int", ",", "cache_data", ":", "bytes", ")", "->", "List", "[", "types", ".", "CodeType", "]", ":", "exc_details", "=", "{", "\"name\"", ":", "fullname", "}", "magic", "=", "cache_data", "[", ":", "4", "]", "raw_timestamp", "=", "cache_data", "[", "4", ":", "8", "]", "raw_size", "=", "cache_data", "[", "8", ":", "12", "]", "if", "magic", "!=", "MAGIC_NUMBER", ":", "message", "=", "(", "f\"Incorrect magic number ({magic}) in {fullname}; expected {MAGIC_NUMBER}\"", ")", "logger", ".", "debug", "(", "message", ")", "raise", "ImportError", "(", "message", ",", "*", "*", "exc_details", ")", "# type: ignore", "elif", "len", "(", "raw_timestamp", ")", "!=", "4", ":", "message", "=", "f\"Reached EOF while reading timestamp in {fullname}\"", "logger", ".", "debug", "(", "message", ")", "raise", "EOFError", "(", "message", ")", "elif", "_r_long", "(", "raw_timestamp", ")", "!=", "mtime", ":", "message", "=", "f\"Non-matching timestamp ({_r_long(raw_timestamp)}) in {fullname} bytecode cache; expected {mtime}\"", "logger", ".", "debug", "(", "message", ")", "raise", "ImportError", "(", "message", ",", "*", "*", "exc_details", ")", "# type: ignore", "elif", "len", "(", "raw_size", ")", "!=", "4", ":", "message", "=", "f\"Reached EOF while reading size of source in {fullname}\"", "logger", ".", "debug", "(", "message", ")", "raise", "EOFError", "(", "message", ")", "elif", "_r_long", "(", "raw_size", ")", "!=", "source_size", ":", "message", "=", "f\"Non-matching filesize ({_r_long(raw_size)}) in {fullname} bytecode cache; expected {source_size}\"", "logger", ".", "debug", "(", "message", ")", "raise", "ImportError", "(", "message", ",", "*", "*", "exc_details", ")", "# type: ignore", "return", "marshal", ".", "loads", "(", "cache_data", "[", "12", ":", "]", ")" ]
Unmarshal the bytes from a Basilisp bytecode cache file, validating the file header prior to returning. If the file header does not match, throw an exception.
[ "Unmarshal", "the", "bytes", "from", "a", "Basilisp", "bytecode", "cache", "file", "validating", "the", "file", "header", "prior", "to", "returning", ".", "If", "the", "file", "header", "does", "not", "match", "throw", "an", "exception", "." ]
python
test
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py#L188-L198
def calculate_embedding(self, batch_image_bytes): """Get the embeddings for a given JPEG image. Args: batch_image_bytes: As if returned from [ff.read() for ff in file_list]. Returns: The Inception embeddings (bottleneck layer output) """ return self.tf_session.run( self.embedding, feed_dict={self.input_jpeg: batch_image_bytes})
[ "def", "calculate_embedding", "(", "self", ",", "batch_image_bytes", ")", ":", "return", "self", ".", "tf_session", ".", "run", "(", "self", ".", "embedding", ",", "feed_dict", "=", "{", "self", ".", "input_jpeg", ":", "batch_image_bytes", "}", ")" ]
Get the embeddings for a given JPEG image. Args: batch_image_bytes: As if returned from [ff.read() for ff in file_list]. Returns: The Inception embeddings (bottleneck layer output)
[ "Get", "the", "embeddings", "for", "a", "given", "JPEG", "image", "." ]
python
train
collectiveacuity/labPack
labpack/speech/watson.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/speech/watson.py#L110-L149
def _get_data(self, file_url, file_name='', method_title='', argument_title=''): ''' a helper method to retrieve data buffer for a file url :param file_url: string with url to file :param file_name: [optional] string with name to affix to file buffer :param method_title: [optional] string with name of class method calling :param argument_title: [optional] string with name of method argument key :return: byte data buffer with file data ''' # https://docs.python.org/3/library/io.html#io.BytesIO import io import requests # fill empty values if not file_name: file_name = 'file' if not method_title: method_title = '%s._get_data' % self.__class__.__name__ if not argument_title: argument_title = 'file_url' # request file from url try: remote_file = requests.get(file_url) except requests.exceptions.ConnectionError as err: if self.requests_handler: return self.requests_handler(err) else: raise except: raise ValueError('%s(%s=%s) is not a valid url.' % (method_title, argument_title, file_url)) # add contents to buffer file_buffer = io.BytesIO(remote_file.content) file_buffer.name = '%s' % file_name return file_buffer
[ "def", "_get_data", "(", "self", ",", "file_url", ",", "file_name", "=", "''", ",", "method_title", "=", "''", ",", "argument_title", "=", "''", ")", ":", "# https://docs.python.org/3/library/io.html#io.BytesIO\r", "import", "io", "import", "requests", "# fill empty values\r", "if", "not", "file_name", ":", "file_name", "=", "'file'", "if", "not", "method_title", ":", "method_title", "=", "'%s._get_data'", "%", "self", ".", "__class__", ".", "__name__", "if", "not", "argument_title", ":", "argument_title", "=", "'file_url'", "# request file from url\r", "try", ":", "remote_file", "=", "requests", ".", "get", "(", "file_url", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "err", ":", "if", "self", ".", "requests_handler", ":", "return", "self", ".", "requests_handler", "(", "err", ")", "else", ":", "raise", "except", ":", "raise", "ValueError", "(", "'%s(%s=%s) is not a valid url.'", "%", "(", "method_title", ",", "argument_title", ",", "file_url", ")", ")", "# add contents to buffer\r", "file_buffer", "=", "io", ".", "BytesIO", "(", "remote_file", ".", "content", ")", "file_buffer", ".", "name", "=", "'%s'", "%", "file_name", "return", "file_buffer" ]
a helper method to retrieve data buffer for a file url :param file_url: string with url to file :param file_name: [optional] string with name to affix to file buffer :param method_title: [optional] string with name of class method calling :param argument_title: [optional] string with name of method argument key :return: byte data buffer with file data
[ "a", "helper", "method", "to", "retrieve", "data", "buffer", "for", "a", "file", "url", ":", "param", "file_url", ":", "string", "with", "url", "to", "file", ":", "param", "file_name", ":", "[", "optional", "]", "string", "with", "name", "to", "affix", "to", "file", "buffer", ":", "param", "method_title", ":", "[", "optional", "]", "string", "with", "name", "of", "class", "method", "calling", ":", "param", "argument_title", ":", "[", "optional", "]", "string", "with", "name", "of", "method", "argument", "key", ":", "return", ":", "byte", "data", "buffer", "with", "file", "data" ]
python
train
rraadd88/rohan
rohan/dandage/align/align_annot.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/align/align_annot.py#L334-L363
def dannotsagg2dannots2dalignbedannot(cfg): """ Map aggregated annotations to queries step#9 :param cfg: configuration dict """ datatmpd=cfg['datatmpd'] dannotsagg=del_Unnamed(pd.read_csv(cfg['dannotsaggp'],sep='\t')) dalignbedstats=del_Unnamed(pd.read_csv(cfg['dalignbedstatsp'],sep='\t')) dalignbedannotp=cfg['dalignbedannotp'] logging.info(basename(dalignbedannotp)) if not exists(dalignbedannotp) or cfg['force']: # df2info(dalignbed) # df2info(dannotsagg) dalignbedannot=dalignbedstats.set_index('id').join(set_index(dannotsagg,'id'), rsuffix=' annotation') dalignbedannot['NM']=dalignbedannot['NM'].apply(int) # from rohan.dandage.get_scores import get_beditorscore_per_alignment,get_cfdscore # dalignbedannot['beditor score']=dalignbedannot.apply(lambda x : get_beditorscore_per_alignment(NM=x['NM'], # genic=True if x['region']=='genic' else False, # alignment=x['alignment'], # pam_length=len(x['PAM']), # pam_position=x['original position'], # # test=cfg['test'], # ),axis=1) # dalignbedannot['CFD score']=dalignbedannot.apply(lambda x : get_cfdscore(x['query sequence'].upper(), x['aligned sequence'].upper()), axis=1) dalignbedannot.to_csv(dalignbedannotp,sep='\t') return cfg
[ "def", "dannotsagg2dannots2dalignbedannot", "(", "cfg", ")", ":", "datatmpd", "=", "cfg", "[", "'datatmpd'", "]", "dannotsagg", "=", "del_Unnamed", "(", "pd", ".", "read_csv", "(", "cfg", "[", "'dannotsaggp'", "]", ",", "sep", "=", "'\\t'", ")", ")", "dalignbedstats", "=", "del_Unnamed", "(", "pd", ".", "read_csv", "(", "cfg", "[", "'dalignbedstatsp'", "]", ",", "sep", "=", "'\\t'", ")", ")", "dalignbedannotp", "=", "cfg", "[", "'dalignbedannotp'", "]", "logging", ".", "info", "(", "basename", "(", "dalignbedannotp", ")", ")", "if", "not", "exists", "(", "dalignbedannotp", ")", "or", "cfg", "[", "'force'", "]", ":", "# df2info(dalignbed)", "# df2info(dannotsagg)", "dalignbedannot", "=", "dalignbedstats", ".", "set_index", "(", "'id'", ")", ".", "join", "(", "set_index", "(", "dannotsagg", ",", "'id'", ")", ",", "rsuffix", "=", "' annotation'", ")", "dalignbedannot", "[", "'NM'", "]", "=", "dalignbedannot", "[", "'NM'", "]", ".", "apply", "(", "int", ")", "# from rohan.dandage.get_scores import get_beditorscore_per_alignment,get_cfdscore", "# dalignbedannot['beditor score']=dalignbedannot.apply(lambda x : get_beditorscore_per_alignment(NM=x['NM'],", "# genic=True if x['region']=='genic' else False,", "# alignment=x['alignment'],", "# pam_length=len(x['PAM']),", "# pam_position=x['original position'],", "# # test=cfg['test'],", "# ),axis=1) ", "# dalignbedannot['CFD score']=dalignbedannot.apply(lambda x : get_cfdscore(x['query sequence'].upper(), x['aligned sequence'].upper()), axis=1) ", "dalignbedannot", ".", "to_csv", "(", "dalignbedannotp", ",", "sep", "=", "'\\t'", ")", "return", "cfg" ]
Map aggregated annotations to queries step#9 :param cfg: configuration dict
[ "Map", "aggregated", "annotations", "to", "queries", "step#9" ]
python
train
apache/spark
python/pyspark/streaming/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L286-L313
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
[ "def", "queueStream", "(", "self", ",", "rdds", ",", "oneAtATime", "=", "True", ",", "default", "=", "None", ")", ":", "if", "default", "and", "not", "isinstance", "(", "default", ",", "RDD", ")", ":", "default", "=", "self", ".", "_sc", ".", "parallelize", "(", "default", ")", "if", "not", "rdds", "and", "default", ":", "rdds", "=", "[", "rdds", "]", "if", "rdds", "and", "not", "isinstance", "(", "rdds", "[", "0", "]", ",", "RDD", ")", ":", "rdds", "=", "[", "self", ".", "_sc", ".", "parallelize", "(", "input", ")", "for", "input", "in", "rdds", "]", "self", ".", "_check_serializers", "(", "rdds", ")", "queue", "=", "self", ".", "_jvm", ".", "PythonDStream", ".", "toRDDQueue", "(", "[", "r", ".", "_jrdd", "for", "r", "in", "rdds", "]", ")", "if", "default", ":", "default", "=", "default", ".", "_reserialize", "(", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ",", "default", ".", "_jrdd", ")", "else", ":", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ")", "return", "DStream", "(", "jdstream", ",", "self", ",", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds
[ "Create", "an", "input", "stream", "from", "a", "queue", "of", "RDDs", "or", "list", ".", "In", "each", "batch", "it", "will", "process", "either", "one", "or", "all", "of", "the", "RDDs", "returned", "by", "the", "queue", "." ]
python
train
quantmind/pulsar
pulsar/utils/path.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/path.py#L72-L77
def ispymodule(self): '''Check if this :class:`Path` is a python module.''' if self.isdir(): return os.path.isfile(os.path.join(self, '__init__.py')) elif self.isfile(): return self.endswith('.py')
[ "def", "ispymodule", "(", "self", ")", ":", "if", "self", ".", "isdir", "(", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ",", "'__init__.py'", ")", ")", "elif", "self", ".", "isfile", "(", ")", ":", "return", "self", ".", "endswith", "(", "'.py'", ")" ]
Check if this :class:`Path` is a python module.
[ "Check", "if", "this", ":", "class", ":", "Path", "is", "a", "python", "module", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L152-L178
def from_file (self, file, file_location, project): """ Creates a virtual target with appropriate name and type from 'file'. If a target with that name in that project was already created, returns that already created target. TODO: more correct way would be to compute path to the file, based on name and source location for the project, and use that path to determine if the target was already created. TODO: passing project with all virtual targets starts to be annoying. """ if __debug__: from .targets import ProjectTarget assert isinstance(file, basestring) assert isinstance(file_location, basestring) assert isinstance(project, ProjectTarget) # Check if we've created a target corresponding to this file. path = os.path.join(os.getcwd(), file_location, file) path = os.path.normpath(path) if path in self.files_: return self.files_ [path] file_type = b2.build.type.type (file) result = FileTarget (file, file_type, project, None, file_location) self.files_ [path] = result return result
[ "def", "from_file", "(", "self", ",", "file", ",", "file_location", ",", "project", ")", ":", "if", "__debug__", ":", "from", ".", "targets", "import", "ProjectTarget", "assert", "isinstance", "(", "file", ",", "basestring", ")", "assert", "isinstance", "(", "file_location", ",", "basestring", ")", "assert", "isinstance", "(", "project", ",", "ProjectTarget", ")", "# Check if we've created a target corresponding to this file.", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "file_location", ",", "file", ")", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "if", "path", "in", "self", ".", "files_", ":", "return", "self", ".", "files_", "[", "path", "]", "file_type", "=", "b2", ".", "build", ".", "type", ".", "type", "(", "file", ")", "result", "=", "FileTarget", "(", "file", ",", "file_type", ",", "project", ",", "None", ",", "file_location", ")", "self", ".", "files_", "[", "path", "]", "=", "result", "return", "result" ]
Creates a virtual target with appropriate name and type from 'file'. If a target with that name in that project was already created, returns that already created target. TODO: more correct way would be to compute path to the file, based on name and source location for the project, and use that path to determine if the target was already created. TODO: passing project with all virtual targets starts to be annoying.
[ "Creates", "a", "virtual", "target", "with", "appropriate", "name", "and", "type", "from", "file", ".", "If", "a", "target", "with", "that", "name", "in", "that", "project", "was", "already", "created", "returns", "that", "already", "created", "target", ".", "TODO", ":", "more", "correct", "way", "would", "be", "to", "compute", "path", "to", "the", "file", "based", "on", "name", "and", "source", "location", "for", "the", "project", "and", "use", "that", "path", "to", "determine", "if", "the", "target", "was", "already", "created", ".", "TODO", ":", "passing", "project", "with", "all", "virtual", "targets", "starts", "to", "be", "annoying", "." ]
python
train
openego/ding0
ding0/tools/logger.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/tools/logger.py#L55-L66
def get_default_home_dir(): """ Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path """ ding0_dir = str(cfg_ding0.get('config', 'config_dir')) return os.path.join(os.path.expanduser('~'), ding0_dir)
[ "def", "get_default_home_dir", "(", ")", ":", "ding0_dir", "=", "str", "(", "cfg_ding0", ".", "get", "(", "'config'", ",", "'config_dir'", ")", ")", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "ding0_dir", ")" ]
Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path
[ "Return", "default", "home", "directory", "of", "Ding0" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4116-L4129
def get_stp_mst_detail_output_cist_port_if_role(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") if_role = ET.SubElement(port, "if-role") if_role.text = kwargs.pop('if_role') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_if_role", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "cist", "=", "ET", ".", "SubElement", "(", "output", ",", "\"cist\"", ")", "port", "=", "ET", ".", "SubElement", "(", "cist", ",", "\"port\"", ")", "if_role", "=", "ET", ".", "SubElement", "(", "port", ",", "\"if-role\"", ")", "if_role", ".", "text", "=", "kwargs", ".", "pop", "(", "'if_role'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
google/grr
grr/client/grr_response_client/client_actions/searching.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/searching.py#L96-L165
def BuildChecks(self, request): """Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed. """ result = [] if request.HasField("start_time") or request.HasField("end_time"): def FilterTimestamp(file_stat, request=request): return file_stat.HasField("st_mtime") and ( file_stat.st_mtime < request.start_time or file_stat.st_mtime > request.end_time) result.append(FilterTimestamp) if request.HasField("min_file_size") or request.HasField("max_file_size"): def FilterSize(file_stat, request=request): return file_stat.HasField("st_size") and ( file_stat.st_size < request.min_file_size or file_stat.st_size > request.max_file_size) result.append(FilterSize) if request.HasField("perm_mode"): def FilterPerms(file_stat, request=request): return (file_stat.st_mode & request.perm_mask) != request.perm_mode result.append(FilterPerms) if request.HasField("uid"): def FilterUID(file_stat, request=request): return file_stat.st_uid != request.uid result.append(FilterUID) if request.HasField("gid"): def FilterGID(file_stat, request=request): return file_stat.st_gid != request.gid result.append(FilterGID) if request.HasField("path_regex"): regex = request.path_regex def FilterPath(file_stat, regex=regex): """Suppress any filename not matching the regular expression.""" return not regex.Search(file_stat.pathspec.Basename()) result.append(FilterPath) if request.HasField("data_regex"): def FilterData(file_stat, **_): """Suppress files that do not match the content.""" return not self.TestFileContent(file_stat) result.append(FilterData) return result
[ "def", "BuildChecks", "(", "self", ",", "request", ")", ":", "result", "=", "[", "]", "if", "request", ".", "HasField", "(", "\"start_time\"", ")", "or", "request", ".", "HasField", "(", "\"end_time\"", ")", ":", "def", "FilterTimestamp", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_mtime\"", ")", "and", "(", "file_stat", ".", "st_mtime", "<", "request", ".", "start_time", "or", "file_stat", ".", "st_mtime", ">", "request", ".", "end_time", ")", "result", ".", "append", "(", "FilterTimestamp", ")", "if", "request", ".", "HasField", "(", "\"min_file_size\"", ")", "or", "request", ".", "HasField", "(", "\"max_file_size\"", ")", ":", "def", "FilterSize", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_size\"", ")", "and", "(", "file_stat", ".", "st_size", "<", "request", ".", "min_file_size", "or", "file_stat", ".", "st_size", ">", "request", ".", "max_file_size", ")", "result", ".", "append", "(", "FilterSize", ")", "if", "request", ".", "HasField", "(", "\"perm_mode\"", ")", ":", "def", "FilterPerms", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "(", "file_stat", ".", "st_mode", "&", "request", ".", "perm_mask", ")", "!=", "request", ".", "perm_mode", "result", ".", "append", "(", "FilterPerms", ")", "if", "request", ".", "HasField", "(", "\"uid\"", ")", ":", "def", "FilterUID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_uid", "!=", "request", ".", "uid", "result", ".", "append", "(", "FilterUID", ")", "if", "request", ".", "HasField", "(", "\"gid\"", ")", ":", "def", "FilterGID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_gid", "!=", "request", ".", "gid", "result", ".", "append", "(", "FilterGID", ")", "if", "request", ".", "HasField", "(", "\"path_regex\"", ")", ":", "regex", "=", "request", ".", "path_regex", "def", "FilterPath", "(", "file_stat", ",", "regex", "=", "regex", ")", ":", "\"\"\"Suppress any filename not matching the regular expression.\"\"\"", "return", "not", "regex", ".", "Search", "(", "file_stat", ".", "pathspec", ".", "Basename", "(", ")", ")", "result", ".", "append", "(", "FilterPath", ")", "if", "request", ".", "HasField", "(", "\"data_regex\"", ")", ":", "def", "FilterData", "(", "file_stat", ",", "*", "*", "_", ")", ":", "\"\"\"Suppress files that do not match the content.\"\"\"", "return", "not", "self", ".", "TestFileContent", "(", "file_stat", ")", "result", ".", "append", "(", "FilterData", ")", "return", "result" ]
Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed.
[ "Parses", "request", "and", "returns", "a", "list", "of", "filter", "callables", "." ]
python
train
mrcagney/gtfstk
gtfstk/validators.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L670-L707
def check_calendar_dates( feed: "Feed", *, as_df: bool = False, include_warnings: bool = False ) -> List: """ Analog of :func:`check_agency` for ``feed.calendar_dates``. """ table = "calendar_dates" problems = [] # Preliminary checks if feed.calendar_dates is None: return problems f = feed.calendar_dates.copy() problems = check_for_required_columns(problems, table, f) if problems: return format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) # Check service_id problems = check_column(problems, table, f, "service_id", valid_str) # Check date problems = check_column(problems, table, f, "date", valid_date) # No duplicate (service_id, date) pairs allowed cond = f[["service_id", "date"]].duplicated() problems = check_table( problems, table, f, cond, "Repeated pair (service_id, date)" ) # Check exception_type v = lambda x: x in [1, 2] problems = check_column(problems, table, f, "exception_type", v) return format_problems(problems, as_df=as_df)
[ "def", "check_calendar_dates", "(", "feed", ":", "\"Feed\"", ",", "*", ",", "as_df", ":", "bool", "=", "False", ",", "include_warnings", ":", "bool", "=", "False", ")", "->", "List", ":", "table", "=", "\"calendar_dates\"", "problems", "=", "[", "]", "# Preliminary checks", "if", "feed", ".", "calendar_dates", "is", "None", ":", "return", "problems", "f", "=", "feed", ".", "calendar_dates", ".", "copy", "(", ")", "problems", "=", "check_for_required_columns", "(", "problems", ",", "table", ",", "f", ")", "if", "problems", ":", "return", "format_problems", "(", "problems", ",", "as_df", "=", "as_df", ")", "if", "include_warnings", ":", "problems", "=", "check_for_invalid_columns", "(", "problems", ",", "table", ",", "f", ")", "# Check service_id", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "\"service_id\"", ",", "valid_str", ")", "# Check date", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "\"date\"", ",", "valid_date", ")", "# No duplicate (service_id, date) pairs allowed", "cond", "=", "f", "[", "[", "\"service_id\"", ",", "\"date\"", "]", "]", ".", "duplicated", "(", ")", "problems", "=", "check_table", "(", "problems", ",", "table", ",", "f", ",", "cond", ",", "\"Repeated pair (service_id, date)\"", ")", "# Check exception_type", "v", "=", "lambda", "x", ":", "x", "in", "[", "1", ",", "2", "]", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "\"exception_type\"", ",", "v", ")", "return", "format_problems", "(", "problems", ",", "as_df", "=", "as_df", ")" ]
Analog of :func:`check_agency` for ``feed.calendar_dates``.
[ "Analog", "of", ":", "func", ":", "check_agency", "for", "feed", ".", "calendar_dates", "." ]
python
train
KE-works/pykechain
pykechain/models/scope.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/scope.py#L152-L159
def service(self, *args, **kwargs): """Retrieve a single service belonging to this scope. See :class:`pykechain.Client.service` for available parameters. .. versionadded:: 1.13 """ return self._client.service(*args, scope=self.id, **kwargs)
[ "def", "service", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_client", ".", "service", "(", "*", "args", ",", "scope", "=", "self", ".", "id", ",", "*", "*", "kwargs", ")" ]
Retrieve a single service belonging to this scope. See :class:`pykechain.Client.service` for available parameters. .. versionadded:: 1.13
[ "Retrieve", "a", "single", "service", "belonging", "to", "this", "scope", "." ]
python
train
timothyb0912/pylogit
pylogit/nested_logit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/nested_logit.py#L238-L256
def convenience_calc_probs(self, params): """ Calculates the probabilities of the chosen alternative, and the long format probabilities for this model and dataset. """ orig_nest_coefs, betas = self.convenience_split_params(params) natural_nest_coefs = nc.naturalize_nest_coefs(orig_nest_coefs) args = [natural_nest_coefs, betas, self.design, self.rows_to_obs, self.rows_to_nests] kwargs = {"chosen_row_to_obs": self.chosen_row_to_obs, "return_type": "long_and_chosen_probs"} probability_results = general_calc_probabilities(*args, **kwargs) return probability_results
[ "def", "convenience_calc_probs", "(", "self", ",", "params", ")", ":", "orig_nest_coefs", ",", "betas", "=", "self", ".", "convenience_split_params", "(", "params", ")", "natural_nest_coefs", "=", "nc", ".", "naturalize_nest_coefs", "(", "orig_nest_coefs", ")", "args", "=", "[", "natural_nest_coefs", ",", "betas", ",", "self", ".", "design", ",", "self", ".", "rows_to_obs", ",", "self", ".", "rows_to_nests", "]", "kwargs", "=", "{", "\"chosen_row_to_obs\"", ":", "self", ".", "chosen_row_to_obs", ",", "\"return_type\"", ":", "\"long_and_chosen_probs\"", "}", "probability_results", "=", "general_calc_probabilities", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "probability_results" ]
Calculates the probabilities of the chosen alternative, and the long format probabilities for this model and dataset.
[ "Calculates", "the", "probabilities", "of", "the", "chosen", "alternative", "and", "the", "long", "format", "probabilities", "for", "this", "model", "and", "dataset", "." ]
python
train
sawcordwell/pymdptoolbox
src/examples/firemdp.py
https://github.com/sawcordwell/pymdptoolbox/blob/7c96789cc80e280437005c12065cf70266c11636/src/examples/firemdp.py#L83-L102
def convertIndexToState(index): """Convert transition probability matrix index to state parameters. Parameters ---------- index : int The index into the transition probability matrix that corresponds to the state parameters. Returns ------- population, fire : tuple of int ``population``, the population abundance class of the threatened species. ``fire``, the time in years since last fire. """ assert index < STATES population = index // FIRE_CLASSES fire = index % FIRE_CLASSES return(population, fire)
[ "def", "convertIndexToState", "(", "index", ")", ":", "assert", "index", "<", "STATES", "population", "=", "index", "//", "FIRE_CLASSES", "fire", "=", "index", "%", "FIRE_CLASSES", "return", "(", "population", ",", "fire", ")" ]
Convert transition probability matrix index to state parameters. Parameters ---------- index : int The index into the transition probability matrix that corresponds to the state parameters. Returns ------- population, fire : tuple of int ``population``, the population abundance class of the threatened species. ``fire``, the time in years since last fire.
[ "Convert", "transition", "probability", "matrix", "index", "to", "state", "parameters", ".", "Parameters", "----------", "index", ":", "int", "The", "index", "into", "the", "transition", "probability", "matrix", "that", "corresponds", "to", "the", "state", "parameters", ".", "Returns", "-------", "population", "fire", ":", "tuple", "of", "int", "population", "the", "population", "abundance", "class", "of", "the", "threatened", "species", ".", "fire", "the", "time", "in", "years", "since", "last", "fire", "." ]
python
train
bjodah/pycompilation
pycompilation/util.py
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L262-L272
def save_to_metadata_file(cls, dirpath, key, value): """ Store `key: value` in metadata file dict. """ fullpath = os.path.join(dirpath, cls.metadata_filename) if os.path.exists(fullpath): d = pickle.load(open(fullpath, 'rb')) d.update({key: value}) pickle.dump(d, open(fullpath, 'wb')) else: pickle.dump({key: value}, open(fullpath, 'wb'))
[ "def", "save_to_metadata_file", "(", "cls", ",", "dirpath", ",", "key", ",", "value", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "cls", ".", "metadata_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "d", "=", "pickle", ".", "load", "(", "open", "(", "fullpath", ",", "'rb'", ")", ")", "d", ".", "update", "(", "{", "key", ":", "value", "}", ")", "pickle", ".", "dump", "(", "d", ",", "open", "(", "fullpath", ",", "'wb'", ")", ")", "else", ":", "pickle", ".", "dump", "(", "{", "key", ":", "value", "}", ",", "open", "(", "fullpath", ",", "'wb'", ")", ")" ]
Store `key: value` in metadata file dict.
[ "Store", "key", ":", "value", "in", "metadata", "file", "dict", "." ]
python
train
brean/python-pathfinding
pathfinding/core/util.py
https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L97-L108
def expand_path(path): ''' Given a compressed path, return a new path that has all the segments in it interpolated. ''' expanded = [] if len(path) < 2: return expanded for i in range(len(path)-1): expanded += bresenham(path[i], path[i + 1]) expanded += [path[:-1]] return expanded
[ "def", "expand_path", "(", "path", ")", ":", "expanded", "=", "[", "]", "if", "len", "(", "path", ")", "<", "2", ":", "return", "expanded", "for", "i", "in", "range", "(", "len", "(", "path", ")", "-", "1", ")", ":", "expanded", "+=", "bresenham", "(", "path", "[", "i", "]", ",", "path", "[", "i", "+", "1", "]", ")", "expanded", "+=", "[", "path", "[", ":", "-", "1", "]", "]", "return", "expanded" ]
Given a compressed path, return a new path that has all the segments in it interpolated.
[ "Given", "a", "compressed", "path", "return", "a", "new", "path", "that", "has", "all", "the", "segments", "in", "it", "interpolated", "." ]
python
train
fulfilio/python-magento
magento/checkout.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/checkout.py#L51-L62
def order(self, quote_id, store_view=None, license_id=None): """ Allows you to create an order from a shopping cart (quote). Before placing the order, you need to add the customer, customer address, shipping and payment methods. :param quote_id: Shopping cart ID (quote ID) :param store_view: Store view ID or code :param license_id: Website license ID :return: string, result of creating order """ return self.call('cart.order', [quote_id, store_view, license_id])
[ "def", "order", "(", "self", ",", "quote_id", ",", "store_view", "=", "None", ",", "license_id", "=", "None", ")", ":", "return", "self", ".", "call", "(", "'cart.order'", ",", "[", "quote_id", ",", "store_view", ",", "license_id", "]", ")" ]
Allows you to create an order from a shopping cart (quote). Before placing the order, you need to add the customer, customer address, shipping and payment methods. :param quote_id: Shopping cart ID (quote ID) :param store_view: Store view ID or code :param license_id: Website license ID :return: string, result of creating order
[ "Allows", "you", "to", "create", "an", "order", "from", "a", "shopping", "cart", "(", "quote", ")", ".", "Before", "placing", "the", "order", "you", "need", "to", "add", "the", "customer", "customer", "address", "shipping", "and", "payment", "methods", "." ]
python
train
MartinThoma/mpu
mpu/io.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/io.py#L352-L367
def get_access_datetime(filepath): """ Get the last time filepath was accessed. Parameters ---------- filepath : str Returns ------- access_datetime : datetime.datetime """ import tzlocal tz = tzlocal.get_localzone() mtime = datetime.fromtimestamp(os.path.getatime(filepath)) return mtime.replace(tzinfo=tz)
[ "def", "get_access_datetime", "(", "filepath", ")", ":", "import", "tzlocal", "tz", "=", "tzlocal", ".", "get_localzone", "(", ")", "mtime", "=", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getatime", "(", "filepath", ")", ")", "return", "mtime", ".", "replace", "(", "tzinfo", "=", "tz", ")" ]
Get the last time filepath was accessed. Parameters ---------- filepath : str Returns ------- access_datetime : datetime.datetime
[ "Get", "the", "last", "time", "filepath", "was", "accessed", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/mpls_state/rsvp/interfaces/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_state/rsvp/interfaces/__init__.py#L168-L191
def _set_interface_type(self, v, load=False): """ Setter method for interface_type, mapped from YANG variable /mpls_state/rsvp/interfaces/interface_type (dcm-interface-type) If this variable is read-only (config: false) in the source YANG file, then _set_interface_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_type() directly. YANG Description: MPLS RSVP interface type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-interface-type-unknown': {'value': 1}, u'dcm-interface-type-loopback': {'value': 7}, u'dcm-interface-type-ve': {'value': 6}, u'dcm-interface-type-ethernet': {'value': 2}, u'dcm-interface-type-fiber-channel': {'value': 8}, u'dcm-interface-type-port-channel': {'value': 5}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='dcm-interface-type', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """interface_type must be of a type compatible with dcm-interface-type""", 'defined-type': "brocade-mpls-operational:dcm-interface-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-interface-type-unknown': {'value': 1}, u'dcm-interface-type-loopback': {'value': 7}, u'dcm-interface-type-ve': {'value': 6}, u'dcm-interface-type-ethernet': {'value': 2}, u'dcm-interface-type-fiber-channel': {'value': 8}, u'dcm-interface-type-port-channel': {'value': 5}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='dcm-interface-type', is_config=False)""", }) self.__interface_type = t if hasattr(self, '_set'): self._set()
[ "def", "_set_interface_type", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'dcm-interface-type-unknown'", ":", "{", "'value'", ":", "1", "}", ",", "u'dcm-interface-type-loopback'", ":", "{", "'value'", ":", "7", "}", ",", "u'dcm-interface-type-ve'", ":", "{", "'value'", ":", "6", "}", ",", "u'dcm-interface-type-ethernet'", ":", "{", "'value'", ":", "2", "}", ",", "u'dcm-interface-type-fiber-channel'", ":", "{", "'value'", ":", "8", "}", ",", "u'dcm-interface-type-port-channel'", ":", "{", "'value'", ":", "5", "}", "}", ",", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"interface-type\"", ",", "rest_name", "=", "\"interface-type\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls-operational'", ",", "defining_module", "=", "'brocade-mpls-operational'", ",", "yang_type", "=", "'dcm-interface-type'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"interface_type must be of a type compatible with dcm-interface-type\"\"\"", ",", "'defined-type'", ":", "\"brocade-mpls-operational:dcm-interface-type\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'dcm-interface-type-unknown': {'value': 1}, u'dcm-interface-type-loopback': {'value': 7}, u'dcm-interface-type-ve': {'value': 6}, u'dcm-interface-type-ethernet': {'value': 2}, u'dcm-interface-type-fiber-channel': {'value': 8}, u'dcm-interface-type-port-channel': {'value': 5}},), is_leaf=True, yang_name=\"interface-type\", rest_name=\"interface-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='dcm-interface-type', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__interface_type", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for interface_type, mapped from YANG variable /mpls_state/rsvp/interfaces/interface_type (dcm-interface-type) If this variable is read-only (config: false) in the source YANG file, then _set_interface_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_type() directly. YANG Description: MPLS RSVP interface type
[ "Setter", "method", "for", "interface_type", "mapped", "from", "YANG", "variable", "/", "mpls_state", "/", "rsvp", "/", "interfaces", "/", "interface_type", "(", "dcm", "-", "interface", "-", "type", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_interface_type", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_interface_type", "()", "directly", "." ]
python
train
transceptor-technology/trender
trender/block.py
https://github.com/transceptor-technology/trender/blob/ef2b7374ea2ecc83dceb139b358ec4ad8ce7033b/trender/block.py#L121-L125
def _reset_plain(self): '''Create a BlockText from the captured lines and clear _text.''' if self._text: self._blocks.append(BlockText('\n'.join(self._text))) self._text.clear()
[ "def", "_reset_plain", "(", "self", ")", ":", "if", "self", ".", "_text", ":", "self", ".", "_blocks", ".", "append", "(", "BlockText", "(", "'\\n'", ".", "join", "(", "self", ".", "_text", ")", ")", ")", "self", ".", "_text", ".", "clear", "(", ")" ]
Create a BlockText from the captured lines and clear _text.
[ "Create", "a", "BlockText", "from", "the", "captured", "lines", "and", "clear", "_text", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchart/xchartrenderer.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartrenderer.py#L106-L115
def buildData(self, key, default=None): """ Returns the build information for the given key. :param key | <str> default | <variant> :return <variant> """ return self._buildData.get(nativestring(key), default)
[ "def", "buildData", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "return", "self", ".", "_buildData", ".", "get", "(", "nativestring", "(", "key", ")", ",", "default", ")" ]
Returns the build information for the given key. :param key | <str> default | <variant> :return <variant>
[ "Returns", "the", "build", "information", "for", "the", "given", "key", ".", ":", "param", "key", "|", "<str", ">", "default", "|", "<variant", ">", ":", "return", "<variant", ">" ]
python
train
hydpy-dev/hydpy
hydpy/core/itemtools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/itemtools.py#L516-L560
def update_variables(self) -> None: """Add the general |ChangeItem.value| with the |Device| specific base variable and assign the result to the respective target variable. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, TestIO = prepare_full_example_2() >>> from hydpy.models.hland_v1 import FIELD >>> for element in hp.elements.catchment: ... control = element.model.parameters.control ... control.nmbzones(3) ... control.zonetype(FIELD) ... control.rfcf(1.1) >>> from hydpy.core.itemtools import AddItem >>> item = AddItem( ... 'sfcf', 'hland_v1', 'control.sfcf', 'control.rfcf', 1) >>> item.collect_variables(pub.selections) >>> land_dill = hp.elements.land_dill >>> land_dill.model.parameters.control.sfcf sfcf(?) >>> item.value = -0.1, 0.0, 0.1 >>> item.update_variables() >>> land_dill.model.parameters.control.sfcf sfcf(1.0, 1.1, 1.2) >>> land_dill.model.parameters.control.rfcf.shape = 2 >>> land_dill.model.parameters.control.rfcf = 1.1 >>> item.update_variables() # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: When trying to add the value(s) `[-0.1 0. 0.1]` of \ AddItem `sfcf` and the value(s) `[ 1.1 1.1]` of variable `rfcf` of element \ `land_dill`, the following error occurred: operands could not be broadcast \ together with shapes (2,) (3,)... """ value = self.value for device, target in self.device2target.items(): base = self.device2base[device] try: result = base.value + value except BaseException: raise objecttools.augment_excmessage( f'When trying to add the value(s) `{value}` of ' f'AddItem `{self.name}` and the value(s) `{base.value}` ' f'of variable {objecttools.devicephrase(base)}') self.update_variable(target, result)
[ "def", "update_variables", "(", "self", ")", "->", "None", ":", "value", "=", "self", ".", "value", "for", "device", ",", "target", "in", "self", ".", "device2target", ".", "items", "(", ")", ":", "base", "=", "self", ".", "device2base", "[", "device", "]", "try", ":", "result", "=", "base", ".", "value", "+", "value", "except", "BaseException", ":", "raise", "objecttools", ".", "augment_excmessage", "(", "f'When trying to add the value(s) `{value}` of '", "f'AddItem `{self.name}` and the value(s) `{base.value}` '", "f'of variable {objecttools.devicephrase(base)}'", ")", "self", ".", "update_variable", "(", "target", ",", "result", ")" ]
Add the general |ChangeItem.value| with the |Device| specific base variable and assign the result to the respective target variable. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, TestIO = prepare_full_example_2() >>> from hydpy.models.hland_v1 import FIELD >>> for element in hp.elements.catchment: ... control = element.model.parameters.control ... control.nmbzones(3) ... control.zonetype(FIELD) ... control.rfcf(1.1) >>> from hydpy.core.itemtools import AddItem >>> item = AddItem( ... 'sfcf', 'hland_v1', 'control.sfcf', 'control.rfcf', 1) >>> item.collect_variables(pub.selections) >>> land_dill = hp.elements.land_dill >>> land_dill.model.parameters.control.sfcf sfcf(?) >>> item.value = -0.1, 0.0, 0.1 >>> item.update_variables() >>> land_dill.model.parameters.control.sfcf sfcf(1.0, 1.1, 1.2) >>> land_dill.model.parameters.control.rfcf.shape = 2 >>> land_dill.model.parameters.control.rfcf = 1.1 >>> item.update_variables() # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: When trying to add the value(s) `[-0.1 0. 0.1]` of \ AddItem `sfcf` and the value(s) `[ 1.1 1.1]` of variable `rfcf` of element \ `land_dill`, the following error occurred: operands could not be broadcast \ together with shapes (2,) (3,)...
[ "Add", "the", "general", "|ChangeItem", ".", "value|", "with", "the", "|Device|", "specific", "base", "variable", "and", "assign", "the", "result", "to", "the", "respective", "target", "variable", "." ]
python
train
wummel/linkchecker
linkcheck/checker/httpurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/httpurl.py#L193-L201
def _get_ssl_sock(self): """Get raw SSL socket.""" assert self.scheme == u"https", self raw_connection = self.url_connection.raw._connection if raw_connection.sock is None: # sometimes the socket is not yet connected # see https://github.com/kennethreitz/requests/issues/1966 raw_connection.connect() return raw_connection.sock
[ "def", "_get_ssl_sock", "(", "self", ")", ":", "assert", "self", ".", "scheme", "==", "u\"https\"", ",", "self", "raw_connection", "=", "self", ".", "url_connection", ".", "raw", ".", "_connection", "if", "raw_connection", ".", "sock", "is", "None", ":", "# sometimes the socket is not yet connected", "# see https://github.com/kennethreitz/requests/issues/1966", "raw_connection", ".", "connect", "(", ")", "return", "raw_connection", ".", "sock" ]
Get raw SSL socket.
[ "Get", "raw", "SSL", "socket", "." ]
python
train
madsbk/lrcloud
lrcloud/__main__.py
https://github.com/madsbk/lrcloud/blob/8d99be3e1abdf941642e9a1c86b7d775dc373c0b/lrcloud/__main__.py#L57-L71
def copy_smart_previews(local_catalog, cloud_catalog, local2cloud=True): """Copy Smart Previews from local to cloud or vica versa when 'local2cloud==False' NB: nothing happens if source dir doesn't exist""" lcat_noext = local_catalog[0:local_catalog.rfind(".lrcat")] ccat_noext = cloud_catalog[0:cloud_catalog.rfind(".lrcat")] lsmart = join(dirname(local_catalog),"%s Smart Previews.lrdata"%basename(lcat_noext)) csmart = join(dirname(cloud_catalog),"%s Smart Previews.lrdata"%basename(ccat_noext)) if local2cloud and os.path.isdir(lsmart): logging.info("Copy Smart Previews - local to cloud: %s => %s"%(lsmart, csmart)) distutils.dir_util.copy_tree(lsmart,csmart, update=1) elif os.path.isdir(csmart): logging.info("Copy Smart Previews - cloud to local: %s => %s"%(csmart, lsmart)) distutils.dir_util.copy_tree(csmart,lsmart, update=1)
[ "def", "copy_smart_previews", "(", "local_catalog", ",", "cloud_catalog", ",", "local2cloud", "=", "True", ")", ":", "lcat_noext", "=", "local_catalog", "[", "0", ":", "local_catalog", ".", "rfind", "(", "\".lrcat\"", ")", "]", "ccat_noext", "=", "cloud_catalog", "[", "0", ":", "cloud_catalog", ".", "rfind", "(", "\".lrcat\"", ")", "]", "lsmart", "=", "join", "(", "dirname", "(", "local_catalog", ")", ",", "\"%s Smart Previews.lrdata\"", "%", "basename", "(", "lcat_noext", ")", ")", "csmart", "=", "join", "(", "dirname", "(", "cloud_catalog", ")", ",", "\"%s Smart Previews.lrdata\"", "%", "basename", "(", "ccat_noext", ")", ")", "if", "local2cloud", "and", "os", ".", "path", ".", "isdir", "(", "lsmart", ")", ":", "logging", ".", "info", "(", "\"Copy Smart Previews - local to cloud: %s => %s\"", "%", "(", "lsmart", ",", "csmart", ")", ")", "distutils", ".", "dir_util", ".", "copy_tree", "(", "lsmart", ",", "csmart", ",", "update", "=", "1", ")", "elif", "os", ".", "path", ".", "isdir", "(", "csmart", ")", ":", "logging", ".", "info", "(", "\"Copy Smart Previews - cloud to local: %s => %s\"", "%", "(", "csmart", ",", "lsmart", ")", ")", "distutils", ".", "dir_util", ".", "copy_tree", "(", "csmart", ",", "lsmart", ",", "update", "=", "1", ")" ]
Copy Smart Previews from local to cloud or vica versa when 'local2cloud==False' NB: nothing happens if source dir doesn't exist
[ "Copy", "Smart", "Previews", "from", "local", "to", "cloud", "or", "vica", "versa", "when", "local2cloud", "==", "False", "NB", ":", "nothing", "happens", "if", "source", "dir", "doesn", "t", "exist" ]
python
valid
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L340-L359
def hide_routemap_holder_route_map_content_match_community_community_access_list_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") community = ET.SubElement(match, "community") community_access_list_name = ET.SubElement(community, "community-access-list-name") community_access_list_name.text = kwargs.pop('community_access_list_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_match_community_community_access_list_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-routemap-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "route_map", "=", "ET", ".", "SubElement", "(", "hide_routemap_holder", ",", "\"route-map\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "action_rm_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"action-rm\"", ")", "action_rm_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'action_rm'", ")", "instance_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"instance\"", ")", "instance_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance'", ")", "content", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"content\"", ")", "match", "=", "ET", ".", "SubElement", "(", "content", ",", "\"match\"", ")", "community", "=", "ET", ".", "SubElement", "(", "match", ",", "\"community\"", ")", "community_access_list_name", "=", "ET", ".", "SubElement", "(", "community", ",", "\"community-access-list-name\"", ")", "community_access_list_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'community_access_list_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
smnorris/pgdata
pgdata/__init__.py
https://github.com/smnorris/pgdata/blob/8b0294024d5ef30b4ae9184888e2cc7004d1784e/pgdata/__init__.py#L47-L65
def drop_db(url): """Drop specified database """ parsed_url = urlparse(url) db_name = parsed_url.path db_name = db_name.strip("/") db = connect("postgresql://" + parsed_url.netloc) # check that db exists q = """SELECT 1 as exists FROM pg_database WHERE datname = '{db_name}'""".format( db_name=db_name ) if db.query(q).fetchone(): # DROP DATABASE must be run outside of a transaction conn = db.engine.connect() conn.execute("commit") conn.execute("DROP DATABASE " + db_name) conn.close()
[ "def", "drop_db", "(", "url", ")", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "db_name", "=", "parsed_url", ".", "path", "db_name", "=", "db_name", ".", "strip", "(", "\"/\"", ")", "db", "=", "connect", "(", "\"postgresql://\"", "+", "parsed_url", ".", "netloc", ")", "# check that db exists", "q", "=", "\"\"\"SELECT 1 as exists\n FROM pg_database\n WHERE datname = '{db_name}'\"\"\"", ".", "format", "(", "db_name", "=", "db_name", ")", "if", "db", ".", "query", "(", "q", ")", ".", "fetchone", "(", ")", ":", "# DROP DATABASE must be run outside of a transaction", "conn", "=", "db", ".", "engine", ".", "connect", "(", ")", "conn", ".", "execute", "(", "\"commit\"", ")", "conn", ".", "execute", "(", "\"DROP DATABASE \"", "+", "db_name", ")", "conn", ".", "close", "(", ")" ]
Drop specified database
[ "Drop", "specified", "database" ]
python
train
kejbaly2/metrique
metrique/plotting.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L97-L119
def plot(self, series, label='', color=None, style=None): ''' Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if self.stacked: series += self.running_sum plt.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series plt.gca().set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style)
[ "def", "plot", "(", "self", ",", "series", ",", "label", "=", "''", ",", "color", "=", "None", ",", "style", "=", "None", ")", ":", "color", "=", "self", ".", "get_color", "(", "color", ")", "if", "self", ".", "stacked", ":", "series", "+=", "self", ".", "running_sum", "plt", ".", "fill_between", "(", "series", ".", "index", ",", "self", ".", "running_sum", ",", "series", ",", "facecolor", "=", "ALPHAS", "[", "color", "]", ")", "self", ".", "running_sum", "=", "series", "plt", ".", "gca", "(", ")", ".", "set_ylim", "(", "bottom", "=", "0", ",", "top", "=", "int", "(", "series", ".", "max", "(", ")", "*", "1.05", ")", ")", "series", ".", "plot", "(", "label", "=", "label", ",", "c", "=", "COLORS", "[", "color", "]", ",", "linewidth", "=", "2", ",", "style", "=", "style", ")" ]
Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot.
[ "Wrapper", "around", "plot", "." ]
python
train
n1analytics/python-paillier
phe/command_line.py
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/command_line.py#L30-L66
def generate_keypair(keysize, id, output): """Generate a paillier private key. Output as JWK to given output file. Use "-" to output the private key to stdout. See the extract command to extract the public component of the private key. Note: The default ID text includes the current time. """ log("Generating a paillier keypair with keysize of {}".format(keysize)) pub, priv = phe.generate_paillier_keypair(n_length=keysize) log("Keys generated") date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") jwk_public = { 'kty': "DAJ", 'alg': "PAI-GN1", "key_ops": ["encrypt"], 'n': phe.util.int_to_base64(pub.n), 'kid': "Paillier public key generated by pheutil on {}".format(date) } jwk_private = { 'kty': "DAJ", 'key_ops': ["decrypt"], 'p': phe.util.int_to_base64(priv.p), 'q': phe.util.int_to_base64(priv.q), 'pub': jwk_public, 'kid': "Paillier private key generated by pheutil on {}".format(date) } json.dump(jwk_private, output) output.write('\n') log("Private key written to {}".format(output.name))
[ "def", "generate_keypair", "(", "keysize", ",", "id", ",", "output", ")", ":", "log", "(", "\"Generating a paillier keypair with keysize of {}\"", ".", "format", "(", "keysize", ")", ")", "pub", ",", "priv", "=", "phe", ".", "generate_paillier_keypair", "(", "n_length", "=", "keysize", ")", "log", "(", "\"Keys generated\"", ")", "date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "jwk_public", "=", "{", "'kty'", ":", "\"DAJ\"", ",", "'alg'", ":", "\"PAI-GN1\"", ",", "\"key_ops\"", ":", "[", "\"encrypt\"", "]", ",", "'n'", ":", "phe", ".", "util", ".", "int_to_base64", "(", "pub", ".", "n", ")", ",", "'kid'", ":", "\"Paillier public key generated by pheutil on {}\"", ".", "format", "(", "date", ")", "}", "jwk_private", "=", "{", "'kty'", ":", "\"DAJ\"", ",", "'key_ops'", ":", "[", "\"decrypt\"", "]", ",", "'p'", ":", "phe", ".", "util", ".", "int_to_base64", "(", "priv", ".", "p", ")", ",", "'q'", ":", "phe", ".", "util", ".", "int_to_base64", "(", "priv", ".", "q", ")", ",", "'pub'", ":", "jwk_public", ",", "'kid'", ":", "\"Paillier private key generated by pheutil on {}\"", ".", "format", "(", "date", ")", "}", "json", ".", "dump", "(", "jwk_private", ",", "output", ")", "output", ".", "write", "(", "'\\n'", ")", "log", "(", "\"Private key written to {}\"", ".", "format", "(", "output", ".", "name", ")", ")" ]
Generate a paillier private key. Output as JWK to given output file. Use "-" to output the private key to stdout. See the extract command to extract the public component of the private key. Note: The default ID text includes the current time.
[ "Generate", "a", "paillier", "private", "key", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/req/req_uninstall.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/req/req_uninstall.py#L99-L134
def compress_for_rename(paths): """Returns a set containing the paths that need to be renamed. This set may include directories when the original sequence of paths included every file on disk. """ case_map = dict((os.path.normcase(p), p) for p in paths) remaining = set(case_map) unchecked = sorted(set(os.path.split(p)[0] for p in case_map.values()), key=len) wildcards = set() def norm_join(*a): return os.path.normcase(os.path.join(*a)) for root in unchecked: if any(os.path.normcase(root).startswith(w) for w in wildcards): # This directory has already been handled. continue all_files = set() all_subdirs = set() for dirname, subdirs, files in os.walk(root): all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) all_files.update(norm_join(root, dirname, f) for f in files) # If all the files we found are in our remaining set of files to # remove, then remove them from the latter set and add a wildcard # for the directory. if not (all_files - remaining): remaining.difference_update(all_files) wildcards.add(root + os.sep) return set(map(case_map.__getitem__, remaining)) | wildcards
[ "def", "compress_for_rename", "(", "paths", ")", ":", "case_map", "=", "dict", "(", "(", "os", ".", "path", ".", "normcase", "(", "p", ")", ",", "p", ")", "for", "p", "in", "paths", ")", "remaining", "=", "set", "(", "case_map", ")", "unchecked", "=", "sorted", "(", "set", "(", "os", ".", "path", ".", "split", "(", "p", ")", "[", "0", "]", "for", "p", "in", "case_map", ".", "values", "(", ")", ")", ",", "key", "=", "len", ")", "wildcards", "=", "set", "(", ")", "def", "norm_join", "(", "*", "a", ")", ":", "return", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "join", "(", "*", "a", ")", ")", "for", "root", "in", "unchecked", ":", "if", "any", "(", "os", ".", "path", ".", "normcase", "(", "root", ")", ".", "startswith", "(", "w", ")", "for", "w", "in", "wildcards", ")", ":", "# This directory has already been handled.", "continue", "all_files", "=", "set", "(", ")", "all_subdirs", "=", "set", "(", ")", "for", "dirname", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "root", ")", ":", "all_subdirs", ".", "update", "(", "norm_join", "(", "root", ",", "dirname", ",", "d", ")", "for", "d", "in", "subdirs", ")", "all_files", ".", "update", "(", "norm_join", "(", "root", ",", "dirname", ",", "f", ")", "for", "f", "in", "files", ")", "# If all the files we found are in our remaining set of files to", "# remove, then remove them from the latter set and add a wildcard", "# for the directory.", "if", "not", "(", "all_files", "-", "remaining", ")", ":", "remaining", ".", "difference_update", "(", "all_files", ")", "wildcards", ".", "add", "(", "root", "+", "os", ".", "sep", ")", "return", "set", "(", "map", "(", "case_map", ".", "__getitem__", ",", "remaining", ")", ")", "|", "wildcards" ]
Returns a set containing the paths that need to be renamed. This set may include directories when the original sequence of paths included every file on disk.
[ "Returns", "a", "set", "containing", "the", "paths", "that", "need", "to", "be", "renamed", "." ]
python
train
mwgielen/jackal
jackal/scripts/relaying.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L136-L143
def terminate_processes(self): """ Terminate the processes. """ if self.relay: self.relay.terminate() if self.responder: self.responder.terminate()
[ "def", "terminate_processes", "(", "self", ")", ":", "if", "self", ".", "relay", ":", "self", ".", "relay", ".", "terminate", "(", ")", "if", "self", ".", "responder", ":", "self", ".", "responder", ".", "terminate", "(", ")" ]
Terminate the processes.
[ "Terminate", "the", "processes", "." ]
python
valid
singularitti/text-stream
text_stream/__init__.py
https://github.com/singularitti/text-stream/blob/4df53b98e9f61d983dbd46edd96db93122577eb5/text_stream/__init__.py#L75-L83
def infile_path(self) -> Optional[PurePath]: """ Read-only property. :return: A ``pathlib.PurePath`` object or ``None``. """ if not self.__infile_path: return Path(self.__infile_path).expanduser() return None
[ "def", "infile_path", "(", "self", ")", "->", "Optional", "[", "PurePath", "]", ":", "if", "not", "self", ".", "__infile_path", ":", "return", "Path", "(", "self", ".", "__infile_path", ")", ".", "expanduser", "(", ")", "return", "None" ]
Read-only property. :return: A ``pathlib.PurePath`` object or ``None``.
[ "Read", "-", "only", "property", "." ]
python
train
rndusr/torf
torf/_torrent.py
https://github.com/rndusr/torf/blob/df0363232daacd3f8c91aafddaa0623b8c28cbd2/torf/_torrent.py#L810-L832
def write_stream(self, stream, validate=True): """ Write :attr:`metainfo` to a file-like object Before any data is written, `stream` is truncated if possible. :param stream: Writable file-like object (e.g. :class:`io.BytesIO`) :param bool validate: Whether to run :meth:`validate` first :raises WriteError: if writing to `stream` fails :raises MetainfoError: if `validate` is `True` and :attr:`metainfo` contains invalid data """ content = self.dump(validate=validate) try: # Remove existing data from stream *after* dump() didn't raise # anything so we don't destroy it prematurely. if stream.seekable(): stream.seek(0) stream.truncate(0) stream.write(content) except OSError as e: raise error.WriteError(e.errno)
[ "def", "write_stream", "(", "self", ",", "stream", ",", "validate", "=", "True", ")", ":", "content", "=", "self", ".", "dump", "(", "validate", "=", "validate", ")", "try", ":", "# Remove existing data from stream *after* dump() didn't raise", "# anything so we don't destroy it prematurely.", "if", "stream", ".", "seekable", "(", ")", ":", "stream", ".", "seek", "(", "0", ")", "stream", ".", "truncate", "(", "0", ")", "stream", ".", "write", "(", "content", ")", "except", "OSError", "as", "e", ":", "raise", "error", ".", "WriteError", "(", "e", ".", "errno", ")" ]
Write :attr:`metainfo` to a file-like object Before any data is written, `stream` is truncated if possible. :param stream: Writable file-like object (e.g. :class:`io.BytesIO`) :param bool validate: Whether to run :meth:`validate` first :raises WriteError: if writing to `stream` fails :raises MetainfoError: if `validate` is `True` and :attr:`metainfo` contains invalid data
[ "Write", ":", "attr", ":", "metainfo", "to", "a", "file", "-", "like", "object" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L636-L678
def sort(self, key_or_list, direction=None): """Sorts this cursor's results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ ('field1', pymongo.ASCENDING), ('field2', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ self.__check_okay_to_chain() keys = helpers._index_list(key_or_list, direction) self.__ordering = helpers._index_document(keys) return self
[ "def", "sort", "(", "self", ",", "key_or_list", ",", "direction", "=", "None", ")", ":", "self", ".", "__check_okay_to_chain", "(", ")", "keys", "=", "helpers", ".", "_index_list", "(", "key_or_list", ",", "direction", ")", "self", ".", "__ordering", "=", "helpers", ".", "_index_document", "(", "keys", ")", "return", "self" ]
Sorts this cursor's results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ ('field1', pymongo.ASCENDING), ('field2', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed
[ "Sorts", "this", "cursor", "s", "results", "." ]
python
train
sassoftware/saspy
saspy/sasproccommons.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasproccommons.py#L306-L326
def _objectmethods(self, obj: str, *args) -> list: """ This method parses the SAS log for artifacts (tables and graphics) that were created from the procedure method call :param obj: str -- proc object :param args: list likely none :return: list -- the tables and graphs available for tab complete """ code = "%listdata(" code += obj code += ");" self.logger.debug("Object Method macro call: " + str(code)) res = self.sas.submit(code, "text") meth = res['LOG'].splitlines() for i in range(len(meth)): meth[i] = meth[i].lstrip().rstrip() self.logger.debug('SAS Log: ' + res['LOG']) objlist = meth[meth.index('startparse9878') + 1:meth.index('endparse9878')] self.logger.debug("PROC attr list: " + str(objlist)) return objlist
[ "def", "_objectmethods", "(", "self", ",", "obj", ":", "str", ",", "*", "args", ")", "->", "list", ":", "code", "=", "\"%listdata(\"", "code", "+=", "obj", "code", "+=", "\");\"", "self", ".", "logger", ".", "debug", "(", "\"Object Method macro call: \"", "+", "str", "(", "code", ")", ")", "res", "=", "self", ".", "sas", ".", "submit", "(", "code", ",", "\"text\"", ")", "meth", "=", "res", "[", "'LOG'", "]", ".", "splitlines", "(", ")", "for", "i", "in", "range", "(", "len", "(", "meth", ")", ")", ":", "meth", "[", "i", "]", "=", "meth", "[", "i", "]", ".", "lstrip", "(", ")", ".", "rstrip", "(", ")", "self", ".", "logger", ".", "debug", "(", "'SAS Log: '", "+", "res", "[", "'LOG'", "]", ")", "objlist", "=", "meth", "[", "meth", ".", "index", "(", "'startparse9878'", ")", "+", "1", ":", "meth", ".", "index", "(", "'endparse9878'", ")", "]", "self", ".", "logger", ".", "debug", "(", "\"PROC attr list: \"", "+", "str", "(", "objlist", ")", ")", "return", "objlist" ]
This method parses the SAS log for artifacts (tables and graphics) that were created from the procedure method call :param obj: str -- proc object :param args: list likely none :return: list -- the tables and graphs available for tab complete
[ "This", "method", "parses", "the", "SAS", "log", "for", "artifacts", "(", "tables", "and", "graphics", ")", "that", "were", "created", "from", "the", "procedure", "method", "call" ]
python
train
zomux/deepy
deepy/networks/network.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/networks/network.py#L237-L249
def save_params(self, path, new_thread=False): """ Save parameters to file. """ save_logger.info(path) param_variables = self.all_parameters params = [p.get_value().copy() for p in param_variables] if new_thread: thread = Thread(target=save_network_params, args=(params, path)) thread.start() else: save_network_params(params, path) self.train_logger.save(path)
[ "def", "save_params", "(", "self", ",", "path", ",", "new_thread", "=", "False", ")", ":", "save_logger", ".", "info", "(", "path", ")", "param_variables", "=", "self", ".", "all_parameters", "params", "=", "[", "p", ".", "get_value", "(", ")", ".", "copy", "(", ")", "for", "p", "in", "param_variables", "]", "if", "new_thread", ":", "thread", "=", "Thread", "(", "target", "=", "save_network_params", ",", "args", "=", "(", "params", ",", "path", ")", ")", "thread", ".", "start", "(", ")", "else", ":", "save_network_params", "(", "params", ",", "path", ")", "self", ".", "train_logger", ".", "save", "(", "path", ")" ]
Save parameters to file.
[ "Save", "parameters", "to", "file", "." ]
python
test
peterldowns/djoauth2
example/client_demo.py
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/example/client_demo.py#L7-L16
def assert_200(response, max_len=500): """ Check that a HTTP response returned 200. """ if response.status_code == 200: return raise ValueError( "Response was {}, not 200:\n{}\n{}".format( response.status_code, json.dumps(dict(response.headers), indent=2), response.content[:max_len]))
[ "def", "assert_200", "(", "response", ",", "max_len", "=", "500", ")", ":", "if", "response", ".", "status_code", "==", "200", ":", "return", "raise", "ValueError", "(", "\"Response was {}, not 200:\\n{}\\n{}\"", ".", "format", "(", "response", ".", "status_code", ",", "json", ".", "dumps", "(", "dict", "(", "response", ".", "headers", ")", ",", "indent", "=", "2", ")", ",", "response", ".", "content", "[", ":", "max_len", "]", ")", ")" ]
Check that a HTTP response returned 200.
[ "Check", "that", "a", "HTTP", "response", "returned", "200", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/panels/codefolding.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/codefolding.py#L394-L430
def mouseMoveEvent(self, event): """ Detect mouser over indicator and highlight the current scope in the editor (up and down decoration arround the foldable text when the mouse is over an indicator). :param event: event """ super(FoldingPanel, self).mouseMoveEvent(event) th = TextHelper(self.editor) line = th.line_nbr_from_position(event.pos().y()) if line >= 0: block = FoldScope.find_parent_scope( self.editor.document().findBlockByNumber(line-1)) if TextBlockHelper.is_fold_trigger(block): if self._mouse_over_line is None: # mouse enter fold scope QApplication.setOverrideCursor( QCursor(Qt.PointingHandCursor)) if self._mouse_over_line != block.blockNumber() and \ self._mouse_over_line is not None: # fold scope changed, a previous block was highlighter so # we quickly update our highlighting self._mouse_over_line = block.blockNumber() self._highlight_block(block) else: # same fold scope, request highlight self._mouse_over_line = block.blockNumber() self._highlight_runner.request_job( self._highlight_block, block) self._highight_block = block else: # no fold scope to highlight, cancel any pending requests self._highlight_runner.cancel_requests() self._mouse_over_line = None QApplication.restoreOverrideCursor() self.repaint()
[ "def", "mouseMoveEvent", "(", "self", ",", "event", ")", ":", "super", "(", "FoldingPanel", ",", "self", ")", ".", "mouseMoveEvent", "(", "event", ")", "th", "=", "TextHelper", "(", "self", ".", "editor", ")", "line", "=", "th", ".", "line_nbr_from_position", "(", "event", ".", "pos", "(", ")", ".", "y", "(", ")", ")", "if", "line", ">=", "0", ":", "block", "=", "FoldScope", ".", "find_parent_scope", "(", "self", ".", "editor", ".", "document", "(", ")", ".", "findBlockByNumber", "(", "line", "-", "1", ")", ")", "if", "TextBlockHelper", ".", "is_fold_trigger", "(", "block", ")", ":", "if", "self", ".", "_mouse_over_line", "is", "None", ":", "# mouse enter fold scope", "QApplication", ".", "setOverrideCursor", "(", "QCursor", "(", "Qt", ".", "PointingHandCursor", ")", ")", "if", "self", ".", "_mouse_over_line", "!=", "block", ".", "blockNumber", "(", ")", "and", "self", ".", "_mouse_over_line", "is", "not", "None", ":", "# fold scope changed, a previous block was highlighter so", "# we quickly update our highlighting", "self", ".", "_mouse_over_line", "=", "block", ".", "blockNumber", "(", ")", "self", ".", "_highlight_block", "(", "block", ")", "else", ":", "# same fold scope, request highlight", "self", ".", "_mouse_over_line", "=", "block", ".", "blockNumber", "(", ")", "self", ".", "_highlight_runner", ".", "request_job", "(", "self", ".", "_highlight_block", ",", "block", ")", "self", ".", "_highight_block", "=", "block", "else", ":", "# no fold scope to highlight, cancel any pending requests", "self", ".", "_highlight_runner", ".", "cancel_requests", "(", ")", "self", ".", "_mouse_over_line", "=", "None", "QApplication", ".", "restoreOverrideCursor", "(", ")", "self", ".", "repaint", "(", ")" ]
Detect mouser over indicator and highlight the current scope in the editor (up and down decoration arround the foldable text when the mouse is over an indicator). :param event: event
[ "Detect", "mouser", "over", "indicator", "and", "highlight", "the", "current", "scope", "in", "the", "editor", "(", "up", "and", "down", "decoration", "arround", "the", "foldable", "text", "when", "the", "mouse", "is", "over", "an", "indicator", ")", "." ]
python
train
nerdvegas/rez
src/rezgui/models/ContextModel.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/models/ContextModel.py#L175-L203
def resolve_context(self, verbosity=0, max_fails=-1, timestamp=None, callback=None, buf=None, package_load_callback=None): """Update the current context by performing a re-resolve. The newly resolved context is only applied if it is a successful solve. Returns: `ResolvedContext` object, which may be a successful or failed solve. """ package_filter = PackageFilterList.from_pod(self.package_filter) context = ResolvedContext( self.request, package_paths=self.packages_path, package_filter=package_filter, verbosity=verbosity, max_fails=max_fails, timestamp=timestamp, buf=buf, callback=callback, package_load_callback=package_load_callback, caching=self.caching) if context.success: if self._context and self._context.load_path: context.set_load_path(self._context.load_path) self._set_context(context) self._modified = True return context
[ "def", "resolve_context", "(", "self", ",", "verbosity", "=", "0", ",", "max_fails", "=", "-", "1", ",", "timestamp", "=", "None", ",", "callback", "=", "None", ",", "buf", "=", "None", ",", "package_load_callback", "=", "None", ")", ":", "package_filter", "=", "PackageFilterList", ".", "from_pod", "(", "self", ".", "package_filter", ")", "context", "=", "ResolvedContext", "(", "self", ".", "request", ",", "package_paths", "=", "self", ".", "packages_path", ",", "package_filter", "=", "package_filter", ",", "verbosity", "=", "verbosity", ",", "max_fails", "=", "max_fails", ",", "timestamp", "=", "timestamp", ",", "buf", "=", "buf", ",", "callback", "=", "callback", ",", "package_load_callback", "=", "package_load_callback", ",", "caching", "=", "self", ".", "caching", ")", "if", "context", ".", "success", ":", "if", "self", ".", "_context", "and", "self", ".", "_context", ".", "load_path", ":", "context", ".", "set_load_path", "(", "self", ".", "_context", ".", "load_path", ")", "self", ".", "_set_context", "(", "context", ")", "self", ".", "_modified", "=", "True", "return", "context" ]
Update the current context by performing a re-resolve. The newly resolved context is only applied if it is a successful solve. Returns: `ResolvedContext` object, which may be a successful or failed solve.
[ "Update", "the", "current", "context", "by", "performing", "a", "re", "-", "resolve", "." ]
python
train
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Attitude/MPU6050.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Attitude/MPU6050.py#L414-L427
def readAccelRange( self ): """! Reads the range of accelerometer setup. @return an int value. It should be one of the following values: @see ACCEL_RANGE_2G @see ACCEL_RANGE_4G @see ACCEL_RANGE_8G @see ACCEL_RANGE_16G """ raw_data = self._readByte(self.REG_ACCEL_CONFIG) raw_data = (raw_data | 0xE7) ^ 0xE7 return raw_data
[ "def", "readAccelRange", "(", "self", ")", ":", "raw_data", "=", "self", ".", "_readByte", "(", "self", ".", "REG_ACCEL_CONFIG", ")", "raw_data", "=", "(", "raw_data", "|", "0xE7", ")", "^", "0xE7", "return", "raw_data" ]
! Reads the range of accelerometer setup. @return an int value. It should be one of the following values: @see ACCEL_RANGE_2G @see ACCEL_RANGE_4G @see ACCEL_RANGE_8G @see ACCEL_RANGE_16G
[ "!", "Reads", "the", "range", "of", "accelerometer", "setup", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_asterix.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_asterix.py#L193-L298
def idle_task(self): '''called on idle''' if self.sock is None: return try: pkt = self.sock.recv(10240) except Exception: return try: if pkt.startswith(b'PICKLED:'): pkt = pkt[8:] # pickled packet try: amsg = [pickle.loads(pkt)] except pickle.UnpicklingError: amsg = asterix.parse(pkt) else: amsg = asterix.parse(pkt) self.pkt_count += 1 self.console.set_status('ASTX', 'ASTX %u/%u' % (self.pkt_count, self.adsb_packets_sent), row=6) except Exception: print("bad packet") return try: logpkt = b'AST:' + struct.pack('<dI', time.time(), len(pkt)) + pkt self.logfile.write(logpkt) except Exception: pass for m in amsg: if self.asterix_settings.debug > 1: print(m) lat = m['I105']['Lat']['val'] lon = m['I105']['Lon']['val'] alt_f = m['I130']['Alt']['val'] climb_rate_fps = m['I220']['RoC']['val'] sac = m['I010']['SAC']['val'] sic = m['I010']['SIC']['val'] trkn = m['I040']['TrkN']['val'] # fake ICAO_address icao_address = trkn & 0xFFFF # use squawk for time in 0.1 second increments. This allows for old msgs to be discarded on vehicle # when using more than one link to vehicle squawk = (int(self.mpstate.attitude_time_s * 10) & 0xFFFF) alt_m = alt_f * 0.3048 # asterix is WGS84, ArduPilot uses AMSL, which is EGM96 alt_m += self.asterix_settings.wgs84_to_AMSL # consider filtering this packet out; if it's not close to # either home or the vehicle position don't send it adsb_pkt = self.master.mav.adsb_vehicle_encode(icao_address, int(lat*1e7), int(lon*1e7), mavutil.mavlink.ADSB_ALTITUDE_TYPE_GEOMETRIC, int(alt_m*1000), # mm 0, # heading 0, # hor vel int(climb_rate_fps * 0.3048 * 100), # cm/s "%08x" % icao_address, 100 + (trkn // 10000), 1.0, (mavutil.mavlink.ADSB_FLAGS_VALID_COORDS | mavutil.mavlink.ADSB_FLAGS_VALID_ALTITUDE | mavutil.mavlink.ADSB_FLAGS_VALID_VELOCITY | mavutil.mavlink.ADSB_FLAGS_VALID_HEADING), squawk) if icao_address in self.tracks: self.tracks[icao_address].update(adsb_pkt, self.get_time()) else: self.tracks[icao_address] = Track(adsb_pkt) if self.asterix_settings.debug > 0: print(adsb_pkt) # send on all links if self.should_send_adsb_pkt(adsb_pkt): self.adsb_packets_sent += 1 for i in range(len(self.mpstate.mav_master)): conn = self.mpstate.mav_master[i] #if adsb_pkt.hor_velocity < 1: # print(adsb_pkt) conn.mav.send(adsb_pkt) else: self.adsb_packets_not_sent += 1 adsb_mod = self.module('adsb') if adsb_mod: # the adsb module is loaded, display on the map adsb_mod.mavlink_packet(adsb_pkt) try: for sysid in self.mpstate.sysid_outputs: # fwd to sysid clients adsb_pkt.pack(self.mpstate.sysid_outputs[sysid].mav) self.mpstate.sysid_outputs[sysid].write(adsb_pkt.get_msgbuf()) except Exception: pass now = time.time() delta = now - self.adsb_byterate_update_timestamp if delta > 5: self.adsb_byterate_update_timestamp = now bytes_per_adsb_packet = 38 # FIXME: find constant self.adsb_byterate = (self.adsb_packets_sent - self.adsb_last_packets_sent)/delta * bytes_per_adsb_packet self.adsb_last_packets_sent = self.adsb_packets_sent
[ "def", "idle_task", "(", "self", ")", ":", "if", "self", ".", "sock", "is", "None", ":", "return", "try", ":", "pkt", "=", "self", ".", "sock", ".", "recv", "(", "10240", ")", "except", "Exception", ":", "return", "try", ":", "if", "pkt", ".", "startswith", "(", "b'PICKLED:'", ")", ":", "pkt", "=", "pkt", "[", "8", ":", "]", "# pickled packet", "try", ":", "amsg", "=", "[", "pickle", ".", "loads", "(", "pkt", ")", "]", "except", "pickle", ".", "UnpicklingError", ":", "amsg", "=", "asterix", ".", "parse", "(", "pkt", ")", "else", ":", "amsg", "=", "asterix", ".", "parse", "(", "pkt", ")", "self", ".", "pkt_count", "+=", "1", "self", ".", "console", ".", "set_status", "(", "'ASTX'", ",", "'ASTX %u/%u'", "%", "(", "self", ".", "pkt_count", ",", "self", ".", "adsb_packets_sent", ")", ",", "row", "=", "6", ")", "except", "Exception", ":", "print", "(", "\"bad packet\"", ")", "return", "try", ":", "logpkt", "=", "b'AST:'", "+", "struct", ".", "pack", "(", "'<dI'", ",", "time", ".", "time", "(", ")", ",", "len", "(", "pkt", ")", ")", "+", "pkt", "self", ".", "logfile", ".", "write", "(", "logpkt", ")", "except", "Exception", ":", "pass", "for", "m", "in", "amsg", ":", "if", "self", ".", "asterix_settings", ".", "debug", ">", "1", ":", "print", "(", "m", ")", "lat", "=", "m", "[", "'I105'", "]", "[", "'Lat'", "]", "[", "'val'", "]", "lon", "=", "m", "[", "'I105'", "]", "[", "'Lon'", "]", "[", "'val'", "]", "alt_f", "=", "m", "[", "'I130'", "]", "[", "'Alt'", "]", "[", "'val'", "]", "climb_rate_fps", "=", "m", "[", "'I220'", "]", "[", "'RoC'", "]", "[", "'val'", "]", "sac", "=", "m", "[", "'I010'", "]", "[", "'SAC'", "]", "[", "'val'", "]", "sic", "=", "m", "[", "'I010'", "]", "[", "'SIC'", "]", "[", "'val'", "]", "trkn", "=", "m", "[", "'I040'", "]", "[", "'TrkN'", "]", "[", "'val'", "]", "# fake ICAO_address", "icao_address", "=", "trkn", "&", "0xFFFF", "# use squawk for time in 0.1 second increments. This allows for old msgs to be discarded on vehicle", "# when using more than one link to vehicle", "squawk", "=", "(", "int", "(", "self", ".", "mpstate", ".", "attitude_time_s", "*", "10", ")", "&", "0xFFFF", ")", "alt_m", "=", "alt_f", "*", "0.3048", "# asterix is WGS84, ArduPilot uses AMSL, which is EGM96", "alt_m", "+=", "self", ".", "asterix_settings", ".", "wgs84_to_AMSL", "# consider filtering this packet out; if it's not close to", "# either home or the vehicle position don't send it", "adsb_pkt", "=", "self", ".", "master", ".", "mav", ".", "adsb_vehicle_encode", "(", "icao_address", ",", "int", "(", "lat", "*", "1e7", ")", ",", "int", "(", "lon", "*", "1e7", ")", ",", "mavutil", ".", "mavlink", ".", "ADSB_ALTITUDE_TYPE_GEOMETRIC", ",", "int", "(", "alt_m", "*", "1000", ")", ",", "# mm", "0", ",", "# heading", "0", ",", "# hor vel", "int", "(", "climb_rate_fps", "*", "0.3048", "*", "100", ")", ",", "# cm/s", "\"%08x\"", "%", "icao_address", ",", "100", "+", "(", "trkn", "//", "10000", ")", ",", "1.0", ",", "(", "mavutil", ".", "mavlink", ".", "ADSB_FLAGS_VALID_COORDS", "|", "mavutil", ".", "mavlink", ".", "ADSB_FLAGS_VALID_ALTITUDE", "|", "mavutil", ".", "mavlink", ".", "ADSB_FLAGS_VALID_VELOCITY", "|", "mavutil", ".", "mavlink", ".", "ADSB_FLAGS_VALID_HEADING", ")", ",", "squawk", ")", "if", "icao_address", "in", "self", ".", "tracks", ":", "self", ".", "tracks", "[", "icao_address", "]", ".", "update", "(", "adsb_pkt", ",", "self", ".", "get_time", "(", ")", ")", "else", ":", "self", ".", "tracks", "[", "icao_address", "]", "=", "Track", "(", "adsb_pkt", ")", "if", "self", ".", "asterix_settings", ".", "debug", ">", "0", ":", "print", "(", "adsb_pkt", ")", "# send on all links", "if", "self", ".", "should_send_adsb_pkt", "(", "adsb_pkt", ")", ":", "self", ".", "adsb_packets_sent", "+=", "1", "for", "i", "in", "range", "(", "len", "(", "self", ".", "mpstate", ".", "mav_master", ")", ")", ":", "conn", "=", "self", ".", "mpstate", ".", "mav_master", "[", "i", "]", "#if adsb_pkt.hor_velocity < 1:", "# print(adsb_pkt)", "conn", ".", "mav", ".", "send", "(", "adsb_pkt", ")", "else", ":", "self", ".", "adsb_packets_not_sent", "+=", "1", "adsb_mod", "=", "self", ".", "module", "(", "'adsb'", ")", "if", "adsb_mod", ":", "# the adsb module is loaded, display on the map", "adsb_mod", ".", "mavlink_packet", "(", "adsb_pkt", ")", "try", ":", "for", "sysid", "in", "self", ".", "mpstate", ".", "sysid_outputs", ":", "# fwd to sysid clients", "adsb_pkt", ".", "pack", "(", "self", ".", "mpstate", ".", "sysid_outputs", "[", "sysid", "]", ".", "mav", ")", "self", ".", "mpstate", ".", "sysid_outputs", "[", "sysid", "]", ".", "write", "(", "adsb_pkt", ".", "get_msgbuf", "(", ")", ")", "except", "Exception", ":", "pass", "now", "=", "time", ".", "time", "(", ")", "delta", "=", "now", "-", "self", ".", "adsb_byterate_update_timestamp", "if", "delta", ">", "5", ":", "self", ".", "adsb_byterate_update_timestamp", "=", "now", "bytes_per_adsb_packet", "=", "38", "# FIXME: find constant", "self", ".", "adsb_byterate", "=", "(", "self", ".", "adsb_packets_sent", "-", "self", ".", "adsb_last_packets_sent", ")", "/", "delta", "*", "bytes_per_adsb_packet", "self", ".", "adsb_last_packets_sent", "=", "self", ".", "adsb_packets_sent" ]
called on idle
[ "called", "on", "idle" ]
python
train
apple/turicreate
src/unity/python/turicreate/data_structures/sarray.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L4373-L4400
def cumulative_var(self): """ Return the cumulative variance of the elements in the SArray. Returns an SArray where each element in the output corresponds to the variance of all the elements preceding and including it. The SArray is expected to be of numeric type, or a numeric vector type. Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_var() dtype: float rows: 3 [0.0, 0.25, 0.6666666666666666, 1.25, 2.0] """ from .. import extensions agg_op = "__builtin__cum_var__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
[ "def", "cumulative_var", "(", "self", ")", ":", "from", ".", ".", "import", "extensions", "agg_op", "=", "\"__builtin__cum_var__\"", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "builtin_cumulative_aggregate", "(", "agg_op", ")", ")" ]
Return the cumulative variance of the elements in the SArray. Returns an SArray where each element in the output corresponds to the variance of all the elements preceding and including it. The SArray is expected to be of numeric type, or a numeric vector type. Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_var() dtype: float rows: 3 [0.0, 0.25, 0.6666666666666666, 1.25, 2.0]
[ "Return", "the", "cumulative", "variance", "of", "the", "elements", "in", "the", "SArray", "." ]
python
train
ericjang/tdb
tdb/debug_session.py
https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L158-L179
def _eval(self, node): """ node is a TensorFlow Op or Tensor from self._exe_order """ # if node.name == 'Momentum': # pdb.set_trace() if isinstance(node,HTOp): # All Tensors MUST be in the cache. feed_dict=dict((t,self._cache[t.name]) for t in node.inputs) node.run(feed_dict) # this will populate self._cache on its own else: # is a TensorFlow node if isinstance(node,tf.Tensor): result=self.session.run(node,self._cache) self._cache[node.name]=result else: # is an operation if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub': # special operation that takes in a tensor ref and mutates it # unfortunately, we end up having to execute nearly the full graph? # alternatively, find a way to pass the tensor_ref thru the feed_dict # rather than the tensor values. self.session.run(node,self._original_feed_dict)
[ "def", "_eval", "(", "self", ",", "node", ")", ":", "# if node.name == 'Momentum':", "# \tpdb.set_trace()", "if", "isinstance", "(", "node", ",", "HTOp", ")", ":", "# All Tensors MUST be in the cache.", "feed_dict", "=", "dict", "(", "(", "t", ",", "self", ".", "_cache", "[", "t", ".", "name", "]", ")", "for", "t", "in", "node", ".", "inputs", ")", "node", ".", "run", "(", "feed_dict", ")", "# this will populate self._cache on its own", "else", ":", "# is a TensorFlow node", "if", "isinstance", "(", "node", ",", "tf", ".", "Tensor", ")", ":", "result", "=", "self", ".", "session", ".", "run", "(", "node", ",", "self", ".", "_cache", ")", "self", ".", "_cache", "[", "node", ".", "name", "]", "=", "result", "else", ":", "# is an operation", "if", "node", ".", "type", "==", "'Assign'", "or", "node", ".", "type", "==", "'AssignAdd'", "or", "node", ".", "type", "==", "'AssignSub'", ":", "# special operation that takes in a tensor ref and mutates it", "# unfortunately, we end up having to execute nearly the full graph?", "# alternatively, find a way to pass the tensor_ref thru the feed_dict", "# rather than the tensor values.", "self", ".", "session", ".", "run", "(", "node", ",", "self", ".", "_original_feed_dict", ")" ]
node is a TensorFlow Op or Tensor from self._exe_order
[ "node", "is", "a", "TensorFlow", "Op", "or", "Tensor", "from", "self", ".", "_exe_order" ]
python
train
Azure/azure-python-devtools
src/azure_devtools/ci_tools/github_tools.py
https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L102-L128
def sync_fork(gh_token, github_repo_id, repo, push=True): """Sync the current branch in this fork against the direct parent on Github""" if not gh_token: _LOGGER.warning('Skipping the upstream repo sync, no token') return _LOGGER.info('Check if repo has to be sync with upstream') github_con = Github(gh_token) github_repo = github_con.get_repo(github_repo_id) if not github_repo.parent: _LOGGER.warning('This repo has no upstream') return upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name) upstream = repo.create_remote('upstream', url=upstream_url) upstream.fetch() active_branch_name = repo.active_branch.name if not active_branch_name in repo.remotes.upstream.refs: _LOGGER.info('Upstream has no branch %s to merge from', active_branch_name) return else: _LOGGER.info('Merge from upstream') msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name)) _LOGGER.debug(msg) if push: msg = repo.git.push() _LOGGER.debug(msg)
[ "def", "sync_fork", "(", "gh_token", ",", "github_repo_id", ",", "repo", ",", "push", "=", "True", ")", ":", "if", "not", "gh_token", ":", "_LOGGER", ".", "warning", "(", "'Skipping the upstream repo sync, no token'", ")", "return", "_LOGGER", ".", "info", "(", "'Check if repo has to be sync with upstream'", ")", "github_con", "=", "Github", "(", "gh_token", ")", "github_repo", "=", "github_con", ".", "get_repo", "(", "github_repo_id", ")", "if", "not", "github_repo", ".", "parent", ":", "_LOGGER", ".", "warning", "(", "'This repo has no upstream'", ")", "return", "upstream_url", "=", "'https://github.com/{}.git'", ".", "format", "(", "github_repo", ".", "parent", ".", "full_name", ")", "upstream", "=", "repo", ".", "create_remote", "(", "'upstream'", ",", "url", "=", "upstream_url", ")", "upstream", ".", "fetch", "(", ")", "active_branch_name", "=", "repo", ".", "active_branch", ".", "name", "if", "not", "active_branch_name", "in", "repo", ".", "remotes", ".", "upstream", ".", "refs", ":", "_LOGGER", ".", "info", "(", "'Upstream has no branch %s to merge from'", ",", "active_branch_name", ")", "return", "else", ":", "_LOGGER", ".", "info", "(", "'Merge from upstream'", ")", "msg", "=", "repo", ".", "git", ".", "rebase", "(", "'upstream/{}'", ".", "format", "(", "repo", ".", "active_branch", ".", "name", ")", ")", "_LOGGER", ".", "debug", "(", "msg", ")", "if", "push", ":", "msg", "=", "repo", ".", "git", ".", "push", "(", ")", "_LOGGER", ".", "debug", "(", "msg", ")" ]
Sync the current branch in this fork against the direct parent on Github
[ "Sync", "the", "current", "branch", "in", "this", "fork", "against", "the", "direct", "parent", "on", "Github" ]
python
train
riga/law
law/workflow/base.py
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L87-L102
def output(self): """ Returns the default workflow outputs in an ordered dictionary. At the moment this is just the collection of outputs of the branch tasks, stored with the key ``"collection"``. """ if self.task.target_collection_cls is not None: cls = self.task.target_collection_cls elif self.task.outputs_siblings: cls = SiblingFileCollection else: cls = TargetCollection targets = luigi.task.getpaths(self.task.get_branch_tasks()) collection = cls(targets, threshold=self.threshold(len(targets))) return OrderedDict([("collection", collection)])
[ "def", "output", "(", "self", ")", ":", "if", "self", ".", "task", ".", "target_collection_cls", "is", "not", "None", ":", "cls", "=", "self", ".", "task", ".", "target_collection_cls", "elif", "self", ".", "task", ".", "outputs_siblings", ":", "cls", "=", "SiblingFileCollection", "else", ":", "cls", "=", "TargetCollection", "targets", "=", "luigi", ".", "task", ".", "getpaths", "(", "self", ".", "task", ".", "get_branch_tasks", "(", ")", ")", "collection", "=", "cls", "(", "targets", ",", "threshold", "=", "self", ".", "threshold", "(", "len", "(", "targets", ")", ")", ")", "return", "OrderedDict", "(", "[", "(", "\"collection\"", ",", "collection", ")", "]", ")" ]
Returns the default workflow outputs in an ordered dictionary. At the moment this is just the collection of outputs of the branch tasks, stored with the key ``"collection"``.
[ "Returns", "the", "default", "workflow", "outputs", "in", "an", "ordered", "dictionary", ".", "At", "the", "moment", "this", "is", "just", "the", "collection", "of", "outputs", "of", "the", "branch", "tasks", "stored", "with", "the", "key", "collection", "." ]
python
train
a1ezzz/wasp-general
wasp_general/network/service.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/service.py#L248-L257
def loop_stopped(self): """ Terminate socket connection because of stopping loop :return: None """ transport = self.transport() if self.server_mode() is True: transport.close_server_socket(self.config()) else: transport.close_client_socket(self.config())
[ "def", "loop_stopped", "(", "self", ")", ":", "transport", "=", "self", ".", "transport", "(", ")", "if", "self", ".", "server_mode", "(", ")", "is", "True", ":", "transport", ".", "close_server_socket", "(", "self", ".", "config", "(", ")", ")", "else", ":", "transport", ".", "close_client_socket", "(", "self", ".", "config", "(", ")", ")" ]
Terminate socket connection because of stopping loop :return: None
[ "Terminate", "socket", "connection", "because", "of", "stopping", "loop" ]
python
train
VIVelev/PyDojoML
dojo/base/preprocessor.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/base/preprocessor.py#L32-L50
def get_params(self, *keys): """Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary """ if len(keys) == 0: return vars(self) else: return [vars(self)[k] for k in keys]
[ "def", "get_params", "(", "self", ",", "*", "keys", ")", ":", "if", "len", "(", "keys", ")", "==", "0", ":", "return", "vars", "(", "self", ")", "else", ":", "return", "[", "vars", "(", "self", ")", "[", "k", "]", "for", "k", "in", "keys", "]" ]
Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
[ "Returns", "the", "specified", "parameters", "for", "the", "current", "preprocessor", "." ]
python
train
wummel/linkchecker
linkcheck/bookmarks/opera.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/bookmarks/opera.py#L40-L54
def find_bookmark_file (): """Return the bookmark file of the Opera profile. Returns absolute filename if found, or empty string if no bookmark file could be found. """ try: dirname = get_profile_dir() if os.path.isdir(dirname): for name in OperaBookmarkFiles: fname = os.path.join(dirname, name) if os.path.isfile(fname): return fname except Exception: pass return u""
[ "def", "find_bookmark_file", "(", ")", ":", "try", ":", "dirname", "=", "get_profile_dir", "(", ")", "if", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "for", "name", "in", "OperaBookmarkFiles", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "name", ")", "if", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "except", "Exception", ":", "pass", "return", "u\"\"" ]
Return the bookmark file of the Opera profile. Returns absolute filename if found, or empty string if no bookmark file could be found.
[ "Return", "the", "bookmark", "file", "of", "the", "Opera", "profile", ".", "Returns", "absolute", "filename", "if", "found", "or", "empty", "string", "if", "no", "bookmark", "file", "could", "be", "found", "." ]
python
train
saltstack/salt
salt/modules/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L1145-L1167
def stop_apppool(name): ''' Stop an IIS application pool. .. versionadded:: 2017.7.0 Args: name (str): The name of the App Pool to stop. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.stop_apppool name='MyTestPool' ''' ps_cmd = ['Stop-WebAppPool', r"'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) return cmd_ret['retcode'] == 0
[ "def", "stop_apppool", "(", "name", ")", ":", "ps_cmd", "=", "[", "'Stop-WebAppPool'", ",", "r\"'{0}'\"", ".", "format", "(", "name", ")", "]", "cmd_ret", "=", "_srvmgr", "(", "ps_cmd", ")", "return", "cmd_ret", "[", "'retcode'", "]", "==", "0" ]
Stop an IIS application pool. .. versionadded:: 2017.7.0 Args: name (str): The name of the App Pool to stop. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.stop_apppool name='MyTestPool'
[ "Stop", "an", "IIS", "application", "pool", "." ]
python
train
Shoobx/xmldiff
xmldiff/main.py
https://github.com/Shoobx/xmldiff/blob/ec7835bce9ba69ff4ce03ab6c11397183b6f8411/xmldiff/main.py#L121-L126
def patch_text(actions, tree): """Takes a string with XML and a string with actions""" tree = etree.fromstring(tree) actions = patch.DiffParser().parse(actions) tree = patch_tree(actions, tree) return etree.tounicode(tree)
[ "def", "patch_text", "(", "actions", ",", "tree", ")", ":", "tree", "=", "etree", ".", "fromstring", "(", "tree", ")", "actions", "=", "patch", ".", "DiffParser", "(", ")", ".", "parse", "(", "actions", ")", "tree", "=", "patch_tree", "(", "actions", ",", "tree", ")", "return", "etree", ".", "tounicode", "(", "tree", ")" ]
Takes a string with XML and a string with actions
[ "Takes", "a", "string", "with", "XML", "and", "a", "string", "with", "actions" ]
python
train