repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Clinical-Genomics/scout
scripts/transfer-archive.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scripts/transfer-archive.py#L62-L116
def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict): """Migrate case information from archive.""" # update collaborators collaborators = list(set(scout_case['collaborators'] + archive_data['collaborators'])) if collaborators != scout_case['collaborators']: LOG.info(f"set collaborators: {', '.join(collaborators)}") scout_case['collaborators'] = collaborators # update assignees if len(scout_case.get('assignees', [])) == 0: scout_user = adapter.user(archive_data['assignee']) if scout_user: scout_case['assignees'] = [archive_data['assignee']] else: LOG.warning(f"{archive_data['assignee']}: unable to find assigned user") # add/update suspected/causative variants for key in ['suspects', 'causatives']: scout_case[key] = scout_case.get(key, []) for archive_variant in archive_data[key]: variant_id = get_variantid(archive_variant, scout_case['_id']) scout_variant = adapter.variant(variant_id) if scout_variant: if scout_variant['_id'] in scout_case[key]: LOG.info(f"{scout_variant['_id']}: variant already in {key}") else: LOG.info(f"{scout_variant['_id']}: add to {key}") scout_variant[key].append(scout_variant['_id']) else: LOG.warning(f"{scout_variant['_id']}: unable to find variant ({key})") scout_variant[key].append(variant_id) if not scout_case.get('synopsis'): # update synopsis scout_case['synopsis'] = archive_data['synopsis'] scout_case['is_migrated'] = True adapter.case_collection.find_one_and_replace( {'_id': scout_case['_id']}, scout_case, ) # add/update phenotype groups/terms scout_institute = adapter.institute(scout_case['owner']) scout_user = adapter.user('[email protected]') for key in ['phenotype_terms', 'phenotype_groups']: for archive_term in archive_data[key]: adapter.add_phenotype( institute=scout_institute, case=scout_case, user=scout_user, link=f"/{scout_case['owner']}/{scout_case['display_name']}", hpo_term=archive_term['phenotype_id'], is_group=key == 'phenotype_groups', )
[ "def", "migrate_case", "(", "adapter", ":", "MongoAdapter", ",", "scout_case", ":", "dict", ",", "archive_data", ":", "dict", ")", ":", "# update collaborators", "collaborators", "=", "list", "(", "set", "(", "scout_case", "[", "'collaborators'", "]", "+", "archive_data", "[", "'collaborators'", "]", ")", ")", "if", "collaborators", "!=", "scout_case", "[", "'collaborators'", "]", ":", "LOG", ".", "info", "(", "f\"set collaborators: {', '.join(collaborators)}\"", ")", "scout_case", "[", "'collaborators'", "]", "=", "collaborators", "# update assignees", "if", "len", "(", "scout_case", ".", "get", "(", "'assignees'", ",", "[", "]", ")", ")", "==", "0", ":", "scout_user", "=", "adapter", ".", "user", "(", "archive_data", "[", "'assignee'", "]", ")", "if", "scout_user", ":", "scout_case", "[", "'assignees'", "]", "=", "[", "archive_data", "[", "'assignee'", "]", "]", "else", ":", "LOG", ".", "warning", "(", "f\"{archive_data['assignee']}: unable to find assigned user\"", ")", "# add/update suspected/causative variants", "for", "key", "in", "[", "'suspects'", ",", "'causatives'", "]", ":", "scout_case", "[", "key", "]", "=", "scout_case", ".", "get", "(", "key", ",", "[", "]", ")", "for", "archive_variant", "in", "archive_data", "[", "key", "]", ":", "variant_id", "=", "get_variantid", "(", "archive_variant", ",", "scout_case", "[", "'_id'", "]", ")", "scout_variant", "=", "adapter", ".", "variant", "(", "variant_id", ")", "if", "scout_variant", ":", "if", "scout_variant", "[", "'_id'", "]", "in", "scout_case", "[", "key", "]", ":", "LOG", ".", "info", "(", "f\"{scout_variant['_id']}: variant already in {key}\"", ")", "else", ":", "LOG", ".", "info", "(", "f\"{scout_variant['_id']}: add to {key}\"", ")", "scout_variant", "[", "key", "]", ".", "append", "(", "scout_variant", "[", "'_id'", "]", ")", "else", ":", "LOG", ".", "warning", "(", "f\"{scout_variant['_id']}: unable to find variant ({key})\"", ")", "scout_variant", "[", "key", "]", ".", "append", "(", "variant_id", ")", "if", "not", "scout_case", ".", "get", "(", "'synopsis'", ")", ":", "# update synopsis", "scout_case", "[", "'synopsis'", "]", "=", "archive_data", "[", "'synopsis'", "]", "scout_case", "[", "'is_migrated'", "]", "=", "True", "adapter", ".", "case_collection", ".", "find_one_and_replace", "(", "{", "'_id'", ":", "scout_case", "[", "'_id'", "]", "}", ",", "scout_case", ",", ")", "# add/update phenotype groups/terms", "scout_institute", "=", "adapter", ".", "institute", "(", "scout_case", "[", "'owner'", "]", ")", "scout_user", "=", "adapter", ".", "user", "(", "'[email protected]'", ")", "for", "key", "in", "[", "'phenotype_terms'", ",", "'phenotype_groups'", "]", ":", "for", "archive_term", "in", "archive_data", "[", "key", "]", ":", "adapter", ".", "add_phenotype", "(", "institute", "=", "scout_institute", ",", "case", "=", "scout_case", ",", "user", "=", "scout_user", ",", "link", "=", "f\"/{scout_case['owner']}/{scout_case['display_name']}\"", ",", "hpo_term", "=", "archive_term", "[", "'phenotype_id'", "]", ",", "is_group", "=", "key", "==", "'phenotype_groups'", ",", ")" ]
Migrate case information from archive.
[ "Migrate", "case", "information", "from", "archive", "." ]
python
test
robinandeer/puzzle
puzzle/models/variant.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L134-L144
def add_transcript(self, transcript): """Add the information transcript This adds a transcript dict to variant['transcripts'] Args: transcript (dict): A transcript dictionary """ logger.debug("Adding transcript {0} to variant {1}".format( transcript, self['variant_id'])) self['transcripts'].append(transcript)
[ "def", "add_transcript", "(", "self", ",", "transcript", ")", ":", "logger", ".", "debug", "(", "\"Adding transcript {0} to variant {1}\"", ".", "format", "(", "transcript", ",", "self", "[", "'variant_id'", "]", ")", ")", "self", "[", "'transcripts'", "]", ".", "append", "(", "transcript", ")" ]
Add the information transcript This adds a transcript dict to variant['transcripts'] Args: transcript (dict): A transcript dictionary
[ "Add", "the", "information", "transcript" ]
python
train
markovmodel/msmtools
msmtools/analysis/sparse/mean_first_passage_time.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/mean_first_passage_time.py#L101-L142
def mfpt_between_sets(T, target, origin, mu=None): """Compute mean-first-passage time between subsets of state space. Parameters ---------- T : scipy.sparse matrix Transition matrix. target : int or list of int Set of target states. origin : int or list of int Set of starting states. mu : (M,) ndarray (optional) The stationary distribution of the transition matrix T. Returns ------- tXY : float Mean first passage time between set X and Y. Notes ----- The mean first passage time :math:`\mathbf{E}_X[T_Y]` is the expected hitting time of one state :math:`y` in :math:`Y` when starting in a state :math:`x` in :math:`X`: .. math :: \mathbb{E}_X[T_Y] = \sum_{x \in X} \frac{\mu_x \mathbb{E}_x[T_Y]}{\sum_{z \in X} \mu_z} """ if mu is None: mu = stationary_distribution(T) """Stationary distribution restriced on starting set X""" nuX = mu[origin] muX = nuX / np.sum(nuX) """Mean first-passage time to Y (for all possible starting states)""" tY = mfpt(T, target) """Mean first-passage time from X to Y""" tXY = np.dot(muX, tY[origin]) return tXY
[ "def", "mfpt_between_sets", "(", "T", ",", "target", ",", "origin", ",", "mu", "=", "None", ")", ":", "if", "mu", "is", "None", ":", "mu", "=", "stationary_distribution", "(", "T", ")", "\"\"\"Stationary distribution restriced on starting set X\"\"\"", "nuX", "=", "mu", "[", "origin", "]", "muX", "=", "nuX", "/", "np", ".", "sum", "(", "nuX", ")", "\"\"\"Mean first-passage time to Y (for all possible starting states)\"\"\"", "tY", "=", "mfpt", "(", "T", ",", "target", ")", "\"\"\"Mean first-passage time from X to Y\"\"\"", "tXY", "=", "np", ".", "dot", "(", "muX", ",", "tY", "[", "origin", "]", ")", "return", "tXY" ]
Compute mean-first-passage time between subsets of state space. Parameters ---------- T : scipy.sparse matrix Transition matrix. target : int or list of int Set of target states. origin : int or list of int Set of starting states. mu : (M,) ndarray (optional) The stationary distribution of the transition matrix T. Returns ------- tXY : float Mean first passage time between set X and Y. Notes ----- The mean first passage time :math:`\mathbf{E}_X[T_Y]` is the expected hitting time of one state :math:`y` in :math:`Y` when starting in a state :math:`x` in :math:`X`: .. math :: \mathbb{E}_X[T_Y] = \sum_{x \in X} \frac{\mu_x \mathbb{E}_x[T_Y]}{\sum_{z \in X} \mu_z}
[ "Compute", "mean", "-", "first", "-", "passage", "time", "between", "subsets", "of", "state", "space", "." ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L630-L640
def get_gml_url(self, qi_type, band='B00'): """ :param qi_type: type of quality indicator :type qi_type: str :param band: band name :type band: str :return: location of gml file on AWS :rtype: str """ band = band.split('/')[-1] return self.get_qi_url('MSK_{}_{}.gml'.format(qi_type, band))
[ "def", "get_gml_url", "(", "self", ",", "qi_type", ",", "band", "=", "'B00'", ")", ":", "band", "=", "band", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "return", "self", ".", "get_qi_url", "(", "'MSK_{}_{}.gml'", ".", "format", "(", "qi_type", ",", "band", ")", ")" ]
:param qi_type: type of quality indicator :type qi_type: str :param band: band name :type band: str :return: location of gml file on AWS :rtype: str
[ ":", "param", "qi_type", ":", "type", "of", "quality", "indicator", ":", "type", "qi_type", ":", "str", ":", "param", "band", ":", "band", "name", ":", "type", "band", ":", "str", ":", "return", ":", "location", "of", "gml", "file", "on", "AWS", ":", "rtype", ":", "str" ]
python
train
INM-6/hybridLFPy
hybridLFPy/population.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1334-L1376
def insert_all_synapses(self, cellindex, cell): """ Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse """ for i, X in enumerate(self.X): #range(self.k_yXL.shape[1]): synParams = self.synParams synParams.update({ 'weight' : self.J_yX[i], 'tau' : self.tau_yX[i], }) for j in range(len(self.synIdx[cellindex][X])): if self.synDelays is not None: synDelays = self.synDelays[cellindex][X][j] else: synDelays = None self.insert_synapses(cell = cell, cellindex = cellindex, synParams = synParams, idx = self.synIdx[cellindex][X][j], X=X, SpCell = self.SpCells[cellindex][X][j], synDelays = synDelays)
[ "def", "insert_all_synapses", "(", "self", ",", "cellindex", ",", "cell", ")", ":", "for", "i", ",", "X", "in", "enumerate", "(", "self", ".", "X", ")", ":", "#range(self.k_yXL.shape[1]):", "synParams", "=", "self", ".", "synParams", "synParams", ".", "update", "(", "{", "'weight'", ":", "self", ".", "J_yX", "[", "i", "]", ",", "'tau'", ":", "self", ".", "tau_yX", "[", "i", "]", ",", "}", ")", "for", "j", "in", "range", "(", "len", "(", "self", ".", "synIdx", "[", "cellindex", "]", "[", "X", "]", ")", ")", ":", "if", "self", ".", "synDelays", "is", "not", "None", ":", "synDelays", "=", "self", ".", "synDelays", "[", "cellindex", "]", "[", "X", "]", "[", "j", "]", "else", ":", "synDelays", "=", "None", "self", ".", "insert_synapses", "(", "cell", "=", "cell", ",", "cellindex", "=", "cellindex", ",", "synParams", "=", "synParams", ",", "idx", "=", "self", ".", "synIdx", "[", "cellindex", "]", "[", "X", "]", "[", "j", "]", ",", "X", "=", "X", ",", "SpCell", "=", "self", ".", "SpCells", "[", "cellindex", "]", "[", "X", "]", "[", "j", "]", ",", "synDelays", "=", "synDelays", ")" ]
Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse
[ "Insert", "all", "synaptic", "events", "from", "all", "presynaptic", "layers", "on", "cell", "object", "with", "index", "cellindex", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L657-L698
def errors(self, batch_id, halt_on_error=True): """Retrieve Batch errors to ThreatConnect API. .. code-block:: javascript [{ "errorReason": "Incident incident-001 has an invalid status.", "errorSource": "incident-001 is not valid." }, { "errorReason": "Incident incident-002 has an invalid status.", "errorSource":"incident-002 is not valid." }] Args: batch_id (str): The ID returned from the ThreatConnect API for the current batch job. halt_on_error (bool, default:True): If True any exception will raise an error. """ errors = [] try: r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id)) # if r.status_code == 404: # time.sleep(5) # allow time for errors to be processed # r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id)) self.tcex.log.debug( 'Retrieve Errors for ID {}: status code {}, errors {}'.format( batch_id, r.status_code, r.text ) ) # self.tcex.log.debug('Retrieve Errors URL {}'.format(r.url)) # API does not return correct content type if r.ok: errors = json.loads(r.text) # temporarily process errors to find "critical" errors. # FR in core to return error codes. for error in errors: error_reason = error.get('errorReason') for error_msg in self._critical_failures: if re.findall(error_msg, error_reason): self.tcex.handle_error(10500, [error_reason], halt_on_error) return errors except Exception as e: self.tcex.handle_error(560, [e], halt_on_error)
[ "def", "errors", "(", "self", ",", "batch_id", ",", "halt_on_error", "=", "True", ")", ":", "errors", "=", "[", "]", "try", ":", "r", "=", "self", ".", "tcex", ".", "session", ".", "get", "(", "'/v2/batch/{}/errors'", ".", "format", "(", "batch_id", ")", ")", "# if r.status_code == 404:", "# time.sleep(5) # allow time for errors to be processed", "# r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id))", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'Retrieve Errors for ID {}: status code {}, errors {}'", ".", "format", "(", "batch_id", ",", "r", ".", "status_code", ",", "r", ".", "text", ")", ")", "# self.tcex.log.debug('Retrieve Errors URL {}'.format(r.url))", "# API does not return correct content type", "if", "r", ".", "ok", ":", "errors", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "# temporarily process errors to find \"critical\" errors.", "# FR in core to return error codes.", "for", "error", "in", "errors", ":", "error_reason", "=", "error", ".", "get", "(", "'errorReason'", ")", "for", "error_msg", "in", "self", ".", "_critical_failures", ":", "if", "re", ".", "findall", "(", "error_msg", ",", "error_reason", ")", ":", "self", ".", "tcex", ".", "handle_error", "(", "10500", ",", "[", "error_reason", "]", ",", "halt_on_error", ")", "return", "errors", "except", "Exception", "as", "e", ":", "self", ".", "tcex", ".", "handle_error", "(", "560", ",", "[", "e", "]", ",", "halt_on_error", ")" ]
Retrieve Batch errors to ThreatConnect API. .. code-block:: javascript [{ "errorReason": "Incident incident-001 has an invalid status.", "errorSource": "incident-001 is not valid." }, { "errorReason": "Incident incident-002 has an invalid status.", "errorSource":"incident-002 is not valid." }] Args: batch_id (str): The ID returned from the ThreatConnect API for the current batch job. halt_on_error (bool, default:True): If True any exception will raise an error.
[ "Retrieve", "Batch", "errors", "to", "ThreatConnect", "API", "." ]
python
train
Chilipp/psy-simple
psy_simple/widgets/texts.py
https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/widgets/texts.py#L420-L437
def refresh(self): """Refresh the widgets from the current font""" font = self.current_font # refresh btn_bold self.btn_bold.blockSignals(True) self.btn_bold.setChecked(font.weight() > 50) self.btn_bold.blockSignals(False) # refresh btn_italic self.btn_italic.blockSignals(True) self.btn_italic.setChecked(font.italic()) self.btn_italic.blockSignals(False) # refresh font size self.spin_box.blockSignals(True) self.spin_box.setValue(font.pointSize()) self.spin_box.blockSignals(False)
[ "def", "refresh", "(", "self", ")", ":", "font", "=", "self", ".", "current_font", "# refresh btn_bold", "self", ".", "btn_bold", ".", "blockSignals", "(", "True", ")", "self", ".", "btn_bold", ".", "setChecked", "(", "font", ".", "weight", "(", ")", ">", "50", ")", "self", ".", "btn_bold", ".", "blockSignals", "(", "False", ")", "# refresh btn_italic", "self", ".", "btn_italic", ".", "blockSignals", "(", "True", ")", "self", ".", "btn_italic", ".", "setChecked", "(", "font", ".", "italic", "(", ")", ")", "self", ".", "btn_italic", ".", "blockSignals", "(", "False", ")", "# refresh font size", "self", ".", "spin_box", ".", "blockSignals", "(", "True", ")", "self", ".", "spin_box", ".", "setValue", "(", "font", ".", "pointSize", "(", ")", ")", "self", ".", "spin_box", ".", "blockSignals", "(", "False", ")" ]
Refresh the widgets from the current font
[ "Refresh", "the", "widgets", "from", "the", "current", "font" ]
python
train
roclark/sportsreference
sportsreference/nfl/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/roster.py#L357-L373
def _parse_player_information(self, player_info): """ Parse general player information. Parse general player information such as height, weight, and name. The attribute for the requested field will be set with the value prior to returning. Parameters ---------- player_info : PyQuery object A PyQuery object containing the HTML from the player's stats page. """ for field in ['_height', '_weight', '_name']: short_field = str(field)[1:] value = utils._parse_field(PLAYER_SCHEME, player_info, short_field) setattr(self, field, value)
[ "def", "_parse_player_information", "(", "self", ",", "player_info", ")", ":", "for", "field", "in", "[", "'_height'", ",", "'_weight'", ",", "'_name'", "]", ":", "short_field", "=", "str", "(", "field", ")", "[", "1", ":", "]", "value", "=", "utils", ".", "_parse_field", "(", "PLAYER_SCHEME", ",", "player_info", ",", "short_field", ")", "setattr", "(", "self", ",", "field", ",", "value", ")" ]
Parse general player information. Parse general player information such as height, weight, and name. The attribute for the requested field will be set with the value prior to returning. Parameters ---------- player_info : PyQuery object A PyQuery object containing the HTML from the player's stats page.
[ "Parse", "general", "player", "information", "." ]
python
train
trolldbois/ctypeslib
ctypeslib/codegen/handler.py
https://github.com/trolldbois/ctypeslib/blob/2aeb1942a5a32a5cc798c287cd0d9e684a0181a8/ctypeslib/codegen/handler.py#L68-L84
def make_python_name(self, name): """Transforms an USR into a valid python name.""" # FIXME see cindex.SpellingCache for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''), ("$", "DOLLAR"), (".", "DOT"), ("@", "_"), (":", "_"), ('-', '_')]: if k in name: # template name = name.replace(k, v) # FIXME: test case ? I want this func to be neutral on C valid # names. if name.startswith("__"): return "_X" + name if len(name) == 0: pass elif name[0] in "01234567879": return "_" + name return name
[ "def", "make_python_name", "(", "self", ",", "name", ")", ":", "# FIXME see cindex.SpellingCache", "for", "k", ",", "v", "in", "[", "(", "'<'", ",", "'_'", ")", ",", "(", "'>'", ",", "'_'", ")", ",", "(", "'::'", ",", "'__'", ")", ",", "(", "','", ",", "''", ")", ",", "(", "' '", ",", "''", ")", ",", "(", "\"$\"", ",", "\"DOLLAR\"", ")", ",", "(", "\".\"", ",", "\"DOT\"", ")", ",", "(", "\"@\"", ",", "\"_\"", ")", ",", "(", "\":\"", ",", "\"_\"", ")", ",", "(", "'-'", ",", "'_'", ")", "]", ":", "if", "k", "in", "name", ":", "# template", "name", "=", "name", ".", "replace", "(", "k", ",", "v", ")", "# FIXME: test case ? I want this func to be neutral on C valid", "# names.", "if", "name", ".", "startswith", "(", "\"__\"", ")", ":", "return", "\"_X\"", "+", "name", "if", "len", "(", "name", ")", "==", "0", ":", "pass", "elif", "name", "[", "0", "]", "in", "\"01234567879\"", ":", "return", "\"_\"", "+", "name", "return", "name" ]
Transforms an USR into a valid python name.
[ "Transforms", "an", "USR", "into", "a", "valid", "python", "name", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L640-L690
def get_deployment_targets(self, project, deployment_group_id, tags=None, name=None, partial_name_match=None, expand=None, agent_status=None, agent_job_result=None, continuation_token=None, top=None, enabled=None, property_filters=None): """GetDeploymentTargets. [Preview API] Get a list of deployment targets in a deployment group. :param str project: Project ID or project name :param int deployment_group_id: ID of the deployment group. :param [str] tags: Get only the deployment targets that contain all these comma separted list of tags. :param str name: Name pattern of the deployment targets to return. :param bool partial_name_match: When set to true, treats **name** as pattern. Else treats it as absolute match. Default is **false**. :param str expand: Include these additional details in the returned objects. :param str agent_status: Get only deployment targets that have this status. :param str agent_job_result: Get only deployment targets that have this last job result. :param str continuation_token: Get deployment targets with names greater than this continuationToken lexicographically. :param int top: Maximum number of deployment targets to return. Default is **1000**. :param bool enabled: Get only deployment targets that are enabled or disabled. Default is 'null' which returns all the targets. :param [str] property_filters: :rtype: [DeploymentMachine] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if deployment_group_id is not None: route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int') query_parameters = {} if tags is not None: tags = ",".join(tags) query_parameters['tags'] = self._serialize.query('tags', tags, 'str') if name is not None: query_parameters['name'] = self._serialize.query('name', name, 'str') if partial_name_match is not None: query_parameters['partialNameMatch'] = self._serialize.query('partial_name_match', partial_name_match, 'bool') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if agent_status is not None: query_parameters['agentStatus'] = self._serialize.query('agent_status', agent_status, 'str') if agent_job_result is not None: query_parameters['agentJobResult'] = self._serialize.query('agent_job_result', agent_job_result, 'str') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if enabled is not None: query_parameters['enabled'] = self._serialize.query('enabled', enabled, 'bool') if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') response = self._send(http_method='GET', location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[DeploymentMachine]', self._unwrap_collection(response))
[ "def", "get_deployment_targets", "(", "self", ",", "project", ",", "deployment_group_id", ",", "tags", "=", "None", ",", "name", "=", "None", ",", "partial_name_match", "=", "None", ",", "expand", "=", "None", ",", "agent_status", "=", "None", ",", "agent_job_result", "=", "None", ",", "continuation_token", "=", "None", ",", "top", "=", "None", ",", "enabled", "=", "None", ",", "property_filters", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "deployment_group_id", "is", "not", "None", ":", "route_values", "[", "'deploymentGroupId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'deployment_group_id'", ",", "deployment_group_id", ",", "'int'", ")", "query_parameters", "=", "{", "}", "if", "tags", "is", "not", "None", ":", "tags", "=", "\",\"", ".", "join", "(", "tags", ")", "query_parameters", "[", "'tags'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'tags'", ",", "tags", ",", "'str'", ")", "if", "name", "is", "not", "None", ":", "query_parameters", "[", "'name'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'name'", ",", "name", ",", "'str'", ")", "if", "partial_name_match", "is", "not", "None", ":", "query_parameters", "[", "'partialNameMatch'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'partial_name_match'", ",", "partial_name_match", ",", "'bool'", ")", "if", "expand", "is", "not", "None", ":", "query_parameters", "[", "'$expand'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'expand'", ",", "expand", ",", "'str'", ")", "if", "agent_status", "is", "not", "None", ":", "query_parameters", "[", "'agentStatus'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'agent_status'", ",", "agent_status", ",", "'str'", ")", "if", "agent_job_result", "is", "not", "None", ":", "query_parameters", "[", "'agentJobResult'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'agent_job_result'", ",", "agent_job_result", ",", "'str'", ")", "if", "continuation_token", "is", "not", "None", ":", "query_parameters", "[", "'continuationToken'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'continuation_token'", ",", "continuation_token", ",", "'str'", ")", "if", "top", "is", "not", "None", ":", "query_parameters", "[", "'$top'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'top'", ",", "top", ",", "'int'", ")", "if", "enabled", "is", "not", "None", ":", "query_parameters", "[", "'enabled'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'enabled'", ",", "enabled", ",", "'bool'", ")", "if", "property_filters", "is", "not", "None", ":", "property_filters", "=", "\",\"", ".", "join", "(", "property_filters", ")", "query_parameters", "[", "'propertyFilters'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'property_filters'", ",", "property_filters", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'2f0aa599-c121-4256-a5fd-ba370e0ae7b6'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[DeploymentMachine]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetDeploymentTargets. [Preview API] Get a list of deployment targets in a deployment group. :param str project: Project ID or project name :param int deployment_group_id: ID of the deployment group. :param [str] tags: Get only the deployment targets that contain all these comma separted list of tags. :param str name: Name pattern of the deployment targets to return. :param bool partial_name_match: When set to true, treats **name** as pattern. Else treats it as absolute match. Default is **false**. :param str expand: Include these additional details in the returned objects. :param str agent_status: Get only deployment targets that have this status. :param str agent_job_result: Get only deployment targets that have this last job result. :param str continuation_token: Get deployment targets with names greater than this continuationToken lexicographically. :param int top: Maximum number of deployment targets to return. Default is **1000**. :param bool enabled: Get only deployment targets that are enabled or disabled. Default is 'null' which returns all the targets. :param [str] property_filters: :rtype: [DeploymentMachine]
[ "GetDeploymentTargets", ".", "[", "Preview", "API", "]", "Get", "a", "list", "of", "deployment", "targets", "in", "a", "deployment", "group", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "deployment_group_id", ":", "ID", "of", "the", "deployment", "group", ".", ":", "param", "[", "str", "]", "tags", ":", "Get", "only", "the", "deployment", "targets", "that", "contain", "all", "these", "comma", "separted", "list", "of", "tags", ".", ":", "param", "str", "name", ":", "Name", "pattern", "of", "the", "deployment", "targets", "to", "return", ".", ":", "param", "bool", "partial_name_match", ":", "When", "set", "to", "true", "treats", "**", "name", "**", "as", "pattern", ".", "Else", "treats", "it", "as", "absolute", "match", ".", "Default", "is", "**", "false", "**", ".", ":", "param", "str", "expand", ":", "Include", "these", "additional", "details", "in", "the", "returned", "objects", ".", ":", "param", "str", "agent_status", ":", "Get", "only", "deployment", "targets", "that", "have", "this", "status", ".", ":", "param", "str", "agent_job_result", ":", "Get", "only", "deployment", "targets", "that", "have", "this", "last", "job", "result", ".", ":", "param", "str", "continuation_token", ":", "Get", "deployment", "targets", "with", "names", "greater", "than", "this", "continuationToken", "lexicographically", ".", ":", "param", "int", "top", ":", "Maximum", "number", "of", "deployment", "targets", "to", "return", ".", "Default", "is", "**", "1000", "**", ".", ":", "param", "bool", "enabled", ":", "Get", "only", "deployment", "targets", "that", "are", "enabled", "or", "disabled", ".", "Default", "is", "null", "which", "returns", "all", "the", "targets", ".", ":", "param", "[", "str", "]", "property_filters", ":", ":", "rtype", ":", "[", "DeploymentMachine", "]" ]
python
train
cidrblock/modelsettings
modelsettings/__init__.py
https://github.com/cidrblock/modelsettings/blob/09763c111fb38b3ba7a13cc95ca59e4393fe75ba/modelsettings/__init__.py#L146-L180
def load_ini(self, ini_file): """ Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded """ if ini_file and not os.path.exists(ini_file): self.log.critical(f"Settings file specified but not found. {ini_file}") sys.exit(1) if not ini_file: ini_file = f"{self.cwd}/settings.ini" if os.path.exists(ini_file): config = configparser.RawConfigParser(allow_no_value=True) config.read(ini_file) for key, value in self.spec.items(): entry = None if value['type'] == str: entry = config.get("settings", option=key.lower(), fallback=None) elif value['type'] == bool: entry = config.getboolean("settings", option=key.lower(), fallback=None) elif value['type'] == int: entry = config.getint("settings", option=key.lower(), fallback=None) elif value['type'] == float: entry = config.getfloat("settings", option=key.lower(), fallback=None) elif value['type'] in [list, dict]: entries = config.get("settings", option=key.lower(), fallback=None) if entries: try: entry = json.loads(entries) except json.decoder.JSONDecodeError as _err: #pragma: no cover self.log.critical(f"Error parsing json from ini file. {entries}") sys.exit(1) if entry is not None: setattr(self, key.upper(), entry)
[ "def", "load_ini", "(", "self", ",", "ini_file", ")", ":", "if", "ini_file", "and", "not", "os", ".", "path", ".", "exists", "(", "ini_file", ")", ":", "self", ".", "log", ".", "critical", "(", "f\"Settings file specified but not found. {ini_file}\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "ini_file", ":", "ini_file", "=", "f\"{self.cwd}/settings.ini\"", "if", "os", ".", "path", ".", "exists", "(", "ini_file", ")", ":", "config", "=", "configparser", ".", "RawConfigParser", "(", "allow_no_value", "=", "True", ")", "config", ".", "read", "(", "ini_file", ")", "for", "key", ",", "value", "in", "self", ".", "spec", ".", "items", "(", ")", ":", "entry", "=", "None", "if", "value", "[", "'type'", "]", "==", "str", ":", "entry", "=", "config", ".", "get", "(", "\"settings\"", ",", "option", "=", "key", ".", "lower", "(", ")", ",", "fallback", "=", "None", ")", "elif", "value", "[", "'type'", "]", "==", "bool", ":", "entry", "=", "config", ".", "getboolean", "(", "\"settings\"", ",", "option", "=", "key", ".", "lower", "(", ")", ",", "fallback", "=", "None", ")", "elif", "value", "[", "'type'", "]", "==", "int", ":", "entry", "=", "config", ".", "getint", "(", "\"settings\"", ",", "option", "=", "key", ".", "lower", "(", ")", ",", "fallback", "=", "None", ")", "elif", "value", "[", "'type'", "]", "==", "float", ":", "entry", "=", "config", ".", "getfloat", "(", "\"settings\"", ",", "option", "=", "key", ".", "lower", "(", ")", ",", "fallback", "=", "None", ")", "elif", "value", "[", "'type'", "]", "in", "[", "list", ",", "dict", "]", ":", "entries", "=", "config", ".", "get", "(", "\"settings\"", ",", "option", "=", "key", ".", "lower", "(", ")", ",", "fallback", "=", "None", ")", "if", "entries", ":", "try", ":", "entry", "=", "json", ".", "loads", "(", "entries", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", "as", "_err", ":", "#pragma: no cover", "self", ".", "log", ".", "critical", "(", "f\"Error parsing json from ini file. {entries}\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "entry", "is", "not", "None", ":", "setattr", "(", "self", ",", "key", ".", "upper", "(", ")", ",", "entry", ")" ]
Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded
[ "Load", "the", "contents", "from", "the", "ini", "file" ]
python
train
hydpy-dev/hydpy
hydpy/models/lstream/lstream_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lstream/lstream_model.py#L878-L939
def calc_qa_v1(self): """Calculate outflow. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required flux sequence: |RK| Required state sequence: |QZ| Updated state sequence: |QA| Basic equation: :math:`QA_{neu} = QA_{alt} + (QZ_{alt}-QA_{alt}) \\cdot (1-exp(-RK^{-1})) + (QZ_{neu}-QZ_{alt}) \\cdot (1-RK\\cdot(1-exp(-RK^{-1})))` Examples: A normal test case: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.rk(0.1) >>> states.qz.old = 2.0 >>> states.qz.new = 4.0 >>> states.qa.old = 3.0 >>> model.calc_qa_v1() >>> states.qa qa(3.800054) First extreme test case (zero division is circumvented): >>> fluxes.rk(0.0) >>> model.calc_qa_v1() >>> states.qa qa(4.0) Second extreme test case (numerical overflow is circumvented): >>> fluxes.rk(1e201) >>> model.calc_qa_v1() >>> states.qa qa(5.0) """ flu = self.sequences.fluxes.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new aid = self.sequences.aides.fastaccess if flu.rk <= 0.: new.qa = new.qz elif flu.rk > 1e200: new.qa = old.qa+new.qz-old.qz else: aid.temp = (1.-modelutils.exp(-1./flu.rk)) new.qa = (old.qa + (old.qz-old.qa)*aid.temp + (new.qz-old.qz)*(1.-flu.rk*aid.temp))
[ "def", "calc_qa_v1", "(", "self", ")", ":", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "old", "=", "self", ".", "sequences", ".", "states", ".", "fastaccess_old", "new", "=", "self", ".", "sequences", ".", "states", ".", "fastaccess_new", "aid", "=", "self", ".", "sequences", ".", "aides", ".", "fastaccess", "if", "flu", ".", "rk", "<=", "0.", ":", "new", ".", "qa", "=", "new", ".", "qz", "elif", "flu", ".", "rk", ">", "1e200", ":", "new", ".", "qa", "=", "old", ".", "qa", "+", "new", ".", "qz", "-", "old", ".", "qz", "else", ":", "aid", ".", "temp", "=", "(", "1.", "-", "modelutils", ".", "exp", "(", "-", "1.", "/", "flu", ".", "rk", ")", ")", "new", ".", "qa", "=", "(", "old", ".", "qa", "+", "(", "old", ".", "qz", "-", "old", ".", "qa", ")", "*", "aid", ".", "temp", "+", "(", "new", ".", "qz", "-", "old", ".", "qz", ")", "*", "(", "1.", "-", "flu", ".", "rk", "*", "aid", ".", "temp", ")", ")" ]
Calculate outflow. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required flux sequence: |RK| Required state sequence: |QZ| Updated state sequence: |QA| Basic equation: :math:`QA_{neu} = QA_{alt} + (QZ_{alt}-QA_{alt}) \\cdot (1-exp(-RK^{-1})) + (QZ_{neu}-QZ_{alt}) \\cdot (1-RK\\cdot(1-exp(-RK^{-1})))` Examples: A normal test case: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.rk(0.1) >>> states.qz.old = 2.0 >>> states.qz.new = 4.0 >>> states.qa.old = 3.0 >>> model.calc_qa_v1() >>> states.qa qa(3.800054) First extreme test case (zero division is circumvented): >>> fluxes.rk(0.0) >>> model.calc_qa_v1() >>> states.qa qa(4.0) Second extreme test case (numerical overflow is circumvented): >>> fluxes.rk(1e201) >>> model.calc_qa_v1() >>> states.qa qa(5.0)
[ "Calculate", "outflow", "." ]
python
train
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L748-L770
def _get_replacement_pdb_id(self): '''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.''' deprecation_lines = self.parsed_lines['OBSLTE'] date_regex = re.compile('(\d+)-(\w{3})-(\d+)') if deprecation_lines: assert(len(deprecation_lines) == 1) tokens = deprecation_lines[0].split()[1:] if tokens[1].upper() in obsolete_pdb_ids_with_no_replacement_entries: assert(len(tokens) == 2) else: assert(len(tokens) == 3) if self.pdb_id: mtchs = date_regex.match(tokens[0]) assert(mtchs) _day = int(mtchs.group(1)) _month = mtchs.group(2) _year = int(mtchs.group(3)) # only two characters? assert(tokens[1] == self.pdb_id) self.deprecation_date = (_day, _month, _year) # no need to do anything fancier unless this is ever needed self.deprecated = True if len(tokens) == 3: assert(len(tokens[2]) == 4) self.replacement_pdb_id = tokens[2]
[ "def", "_get_replacement_pdb_id", "(", "self", ")", ":", "deprecation_lines", "=", "self", ".", "parsed_lines", "[", "'OBSLTE'", "]", "date_regex", "=", "re", ".", "compile", "(", "'(\\d+)-(\\w{3})-(\\d+)'", ")", "if", "deprecation_lines", ":", "assert", "(", "len", "(", "deprecation_lines", ")", "==", "1", ")", "tokens", "=", "deprecation_lines", "[", "0", "]", ".", "split", "(", ")", "[", "1", ":", "]", "if", "tokens", "[", "1", "]", ".", "upper", "(", ")", "in", "obsolete_pdb_ids_with_no_replacement_entries", ":", "assert", "(", "len", "(", "tokens", ")", "==", "2", ")", "else", ":", "assert", "(", "len", "(", "tokens", ")", "==", "3", ")", "if", "self", ".", "pdb_id", ":", "mtchs", "=", "date_regex", ".", "match", "(", "tokens", "[", "0", "]", ")", "assert", "(", "mtchs", ")", "_day", "=", "int", "(", "mtchs", ".", "group", "(", "1", ")", ")", "_month", "=", "mtchs", ".", "group", "(", "2", ")", "_year", "=", "int", "(", "mtchs", ".", "group", "(", "3", ")", ")", "# only two characters?", "assert", "(", "tokens", "[", "1", "]", "==", "self", ".", "pdb_id", ")", "self", ".", "deprecation_date", "=", "(", "_day", ",", "_month", ",", "_year", ")", "# no need to do anything fancier unless this is ever needed", "self", ".", "deprecated", "=", "True", "if", "len", "(", "tokens", ")", "==", "3", ":", "assert", "(", "len", "(", "tokens", "[", "2", "]", ")", "==", "4", ")", "self", ".", "replacement_pdb_id", "=", "tokens", "[", "2", "]" ]
Checks to see if the PDB file has been deprecated and, if so, what the new ID is.
[ "Checks", "to", "see", "if", "the", "PDB", "file", "has", "been", "deprecated", "and", "if", "so", "what", "the", "new", "ID", "is", "." ]
python
train
Kensuke-Mitsuzawa/JapaneseTokenizers
JapaneseTokenizer/common/text_preprocess.py
https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers/blob/3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c/JapaneseTokenizer/common/text_preprocess.py#L77-L83
def normalize_text_normal_ipadic(input_text, kana=True, ascii=True, digit=True): # type: (text_type,bool,bool,bool)->text_type """ * All hankaku Katanaka is converted into Zenkaku Katakana * All hankaku English alphabet and numberc string are converted into Zenkaku one """ return jaconv.h2z(input_text, kana=kana, ascii=ascii, digit=digit)
[ "def", "normalize_text_normal_ipadic", "(", "input_text", ",", "kana", "=", "True", ",", "ascii", "=", "True", ",", "digit", "=", "True", ")", ":", "# type: (text_type,bool,bool,bool)->text_type", "return", "jaconv", ".", "h2z", "(", "input_text", ",", "kana", "=", "kana", ",", "ascii", "=", "ascii", ",", "digit", "=", "digit", ")" ]
* All hankaku Katanaka is converted into Zenkaku Katakana * All hankaku English alphabet and numberc string are converted into Zenkaku one
[ "*", "All", "hankaku", "Katanaka", "is", "converted", "into", "Zenkaku", "Katakana", "*", "All", "hankaku", "English", "alphabet", "and", "numberc", "string", "are", "converted", "into", "Zenkaku", "one" ]
python
train
manns/pyspread
pyspread/src/lib/selection.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/selection.py#L245-L252
def parameters(self): """Returns tuple of selection parameters of self (self.block_tl, self.block_br, self.rows, self.cols, self.cells) """ return self.block_tl, self.block_br, self.rows, self.cols, self.cells
[ "def", "parameters", "(", "self", ")", ":", "return", "self", ".", "block_tl", ",", "self", ".", "block_br", ",", "self", ".", "rows", ",", "self", ".", "cols", ",", "self", ".", "cells" ]
Returns tuple of selection parameters of self (self.block_tl, self.block_br, self.rows, self.cols, self.cells)
[ "Returns", "tuple", "of", "selection", "parameters", "of", "self" ]
python
train
jonathanj/txspinneret
txspinneret/query.py
https://github.com/jonathanj/txspinneret/blob/717008a2c313698984a23e3f3fc62ea3675ed02d/txspinneret/query.py#L148-L177
def Boolean(value, true=(u'yes', u'1', u'true'), false=(u'no', u'0', u'false'), encoding=None): """ Parse a value as a boolean. :type value: `unicode` or `bytes` :param value: Text value to parse. :type true: `tuple` of `unicode` :param true: Values to compare, ignoring case, for ``True`` values. :type false: `tuple` of `unicode` :param false: Values to compare, ignoring case, for ``False`` values. :type encoding: `bytes` :param encoding: Encoding to treat `bytes` values as, defaults to ``utf-8``. :rtype: `bool` :return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or ``false`` values. """ value = Text(value, encoding) if value is not None: value = value.lower().strip() if value in true: return True elif value in false: return False return None
[ "def", "Boolean", "(", "value", ",", "true", "=", "(", "u'yes'", ",", "u'1'", ",", "u'true'", ")", ",", "false", "=", "(", "u'no'", ",", "u'0'", ",", "u'false'", ")", ",", "encoding", "=", "None", ")", ":", "value", "=", "Text", "(", "value", ",", "encoding", ")", "if", "value", "is", "not", "None", ":", "value", "=", "value", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "value", "in", "true", ":", "return", "True", "elif", "value", "in", "false", ":", "return", "False", "return", "None" ]
Parse a value as a boolean. :type value: `unicode` or `bytes` :param value: Text value to parse. :type true: `tuple` of `unicode` :param true: Values to compare, ignoring case, for ``True`` values. :type false: `tuple` of `unicode` :param false: Values to compare, ignoring case, for ``False`` values. :type encoding: `bytes` :param encoding: Encoding to treat `bytes` values as, defaults to ``utf-8``. :rtype: `bool` :return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or ``false`` values.
[ "Parse", "a", "value", "as", "a", "boolean", "." ]
python
valid
ubyssey/dispatch
dispatch/api/views.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/api/views.py#L138-L153
def get_queryset(self): """Only display unpublished content to authenticated users, filter by query parameter if present.""" # Get base queryset from DispatchPublishableMixin queryset = self.get_publishable_queryset() queryset = queryset.order_by('-updated_at') # Optionally filter by a query parameter q = self.request.query_params.get('q') if q: queryset = queryset.filter(title__icontains=q) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "# Get base queryset from DispatchPublishableMixin", "queryset", "=", "self", ".", "get_publishable_queryset", "(", ")", "queryset", "=", "queryset", ".", "order_by", "(", "'-updated_at'", ")", "# Optionally filter by a query parameter", "q", "=", "self", ".", "request", ".", "query_params", ".", "get", "(", "'q'", ")", "if", "q", ":", "queryset", "=", "queryset", ".", "filter", "(", "title__icontains", "=", "q", ")", "return", "queryset" ]
Only display unpublished content to authenticated users, filter by query parameter if present.
[ "Only", "display", "unpublished", "content", "to", "authenticated", "users", "filter", "by", "query", "parameter", "if", "present", "." ]
python
test
chaoss/grimoirelab-elk
grimoire_elk/utils.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/utils.py#L262-L284
def get_kibiter_version(url): """ Return kibiter major number version The url must point to the Elasticsearch used by Kibiter """ config_url = '.kibana/config/_search' # Avoid having // in the URL because ES will fail if url[-1] != '/': url += "/" url += config_url r = requests.get(url) r.raise_for_status() if len(r.json()['hits']['hits']) == 0: logger.error("Can not get the Kibiter version") return None version = r.json()['hits']['hits'][0]['_id'] # 5.4.0-SNAPSHOT major_version = version.split(".", 1)[0] return major_version
[ "def", "get_kibiter_version", "(", "url", ")", ":", "config_url", "=", "'.kibana/config/_search'", "# Avoid having // in the URL because ES will fail", "if", "url", "[", "-", "1", "]", "!=", "'/'", ":", "url", "+=", "\"/\"", "url", "+=", "config_url", "r", "=", "requests", ".", "get", "(", "url", ")", "r", ".", "raise_for_status", "(", ")", "if", "len", "(", "r", ".", "json", "(", ")", "[", "'hits'", "]", "[", "'hits'", "]", ")", "==", "0", ":", "logger", ".", "error", "(", "\"Can not get the Kibiter version\"", ")", "return", "None", "version", "=", "r", ".", "json", "(", ")", "[", "'hits'", "]", "[", "'hits'", "]", "[", "0", "]", "[", "'_id'", "]", "# 5.4.0-SNAPSHOT", "major_version", "=", "version", ".", "split", "(", "\".\"", ",", "1", ")", "[", "0", "]", "return", "major_version" ]
Return kibiter major number version The url must point to the Elasticsearch used by Kibiter
[ "Return", "kibiter", "major", "number", "version" ]
python
train
clchiou/startup
startup.py
https://github.com/clchiou/startup/blob/13cbf3ce1deffbc10d33a5f64c396a73129a5929/startup.py#L271-L294
def _parse_ret(func, variables, annotations=None): """Parse func's return annotation and return either None, a variable, or a tuple of variables. NOTE: * _parse_ret() also notifies variables about will-writes. * A variable can be written multiple times per return annotation. """ anno = (annotations or func.__annotations__).get('return') if anno is None: return None elif isinstance(anno, str): writeto = variables[anno] writeto.notify_will_write() return writeto elif (isinstance(anno, tuple) and all(isinstance(name, str) for name in anno)): writeto = tuple(variables[name] for name in anno) for var in writeto: var.notify_will_write() return writeto # Be very strict about annotation format for now. raise StartupError( 'cannot parse return annotation %r for %r' % (anno, func))
[ "def", "_parse_ret", "(", "func", ",", "variables", ",", "annotations", "=", "None", ")", ":", "anno", "=", "(", "annotations", "or", "func", ".", "__annotations__", ")", ".", "get", "(", "'return'", ")", "if", "anno", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "anno", ",", "str", ")", ":", "writeto", "=", "variables", "[", "anno", "]", "writeto", ".", "notify_will_write", "(", ")", "return", "writeto", "elif", "(", "isinstance", "(", "anno", ",", "tuple", ")", "and", "all", "(", "isinstance", "(", "name", ",", "str", ")", "for", "name", "in", "anno", ")", ")", ":", "writeto", "=", "tuple", "(", "variables", "[", "name", "]", "for", "name", "in", "anno", ")", "for", "var", "in", "writeto", ":", "var", ".", "notify_will_write", "(", ")", "return", "writeto", "# Be very strict about annotation format for now.", "raise", "StartupError", "(", "'cannot parse return annotation %r for %r'", "%", "(", "anno", ",", "func", ")", ")" ]
Parse func's return annotation and return either None, a variable, or a tuple of variables. NOTE: * _parse_ret() also notifies variables about will-writes. * A variable can be written multiple times per return annotation.
[ "Parse", "func", "s", "return", "annotation", "and", "return", "either", "None", "a", "variable", "or", "a", "tuple", "of", "variables", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9128-L9150
def svs_description_metadata(description): """Return metatata from Aperio image description as dict. The Aperio image description format is unspecified. Expect failures. >>> svs_description_metadata('Aperio Image Library v1.0') {'Aperio Image Library': 'v1.0'} """ if not description.startswith('Aperio Image Library '): raise ValueError('invalid Aperio image description') result = {} lines = description.split('\n') key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' result[key.strip()] = value.strip() if len(lines) == 1: return result items = lines[1].split('|') result[''] = items[0].strip() # TODO: parse this? for item in items[1:]: key, value = item.split(' = ') result[key.strip()] = astype(value.strip()) return result
[ "def", "svs_description_metadata", "(", "description", ")", ":", "if", "not", "description", ".", "startswith", "(", "'Aperio Image Library '", ")", ":", "raise", "ValueError", "(", "'invalid Aperio image description'", ")", "result", "=", "{", "}", "lines", "=", "description", ".", "split", "(", "'\\n'", ")", "key", ",", "value", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", ".", "rsplit", "(", "None", ",", "1", ")", "# 'Aperio Image Library'", "result", "[", "key", ".", "strip", "(", ")", "]", "=", "value", ".", "strip", "(", ")", "if", "len", "(", "lines", ")", "==", "1", ":", "return", "result", "items", "=", "lines", "[", "1", "]", ".", "split", "(", "'|'", ")", "result", "[", "''", "]", "=", "items", "[", "0", "]", ".", "strip", "(", ")", "# TODO: parse this?", "for", "item", "in", "items", "[", "1", ":", "]", ":", "key", ",", "value", "=", "item", ".", "split", "(", "' = '", ")", "result", "[", "key", ".", "strip", "(", ")", "]", "=", "astype", "(", "value", ".", "strip", "(", ")", ")", "return", "result" ]
Return metatata from Aperio image description as dict. The Aperio image description format is unspecified. Expect failures. >>> svs_description_metadata('Aperio Image Library v1.0') {'Aperio Image Library': 'v1.0'}
[ "Return", "metatata", "from", "Aperio", "image", "description", "as", "dict", "." ]
python
train
LLNL/scraper
scripts/get_traffic.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L134-L140
def get_releases(self, url='', headers={}, repo_name=''): """ Retrieves the releases for the given repo in JSON. """ url_releases = (url + '/releases') r = requests.get(url_releases, headers=headers) self.releases_json[repo_name] = r.json()
[ "def", "get_releases", "(", "self", ",", "url", "=", "''", ",", "headers", "=", "{", "}", ",", "repo_name", "=", "''", ")", ":", "url_releases", "=", "(", "url", "+", "'/releases'", ")", "r", "=", "requests", ".", "get", "(", "url_releases", ",", "headers", "=", "headers", ")", "self", ".", "releases_json", "[", "repo_name", "]", "=", "r", ".", "json", "(", ")" ]
Retrieves the releases for the given repo in JSON.
[ "Retrieves", "the", "releases", "for", "the", "given", "repo", "in", "JSON", "." ]
python
test
xzased/lvm2py
lvm2py/lv.py
https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/lv.py#L65-L78
def open(self): """ Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ self.vg.open() self.__lvh = lvm_lv_from_uuid(self.vg.handle, self.uuid) if not bool(self.__lvh): raise HandleError("Failed to initialize LV Handle.")
[ "def", "open", "(", "self", ")", ":", "self", ".", "vg", ".", "open", "(", ")", "self", ".", "__lvh", "=", "lvm_lv_from_uuid", "(", "self", ".", "vg", ".", "handle", ",", "self", ".", "uuid", ")", "if", "not", "bool", "(", "self", ".", "__lvh", ")", ":", "raise", "HandleError", "(", "\"Failed to initialize LV Handle.\"", ")" ]
Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
[ "Obtains", "the", "lvm", "vg_t", "and", "lv_t", "handle", ".", "Usually", "you", "would", "never", "need", "to", "use", "this", "method", "unless", "you", "are", "doing", "operations", "using", "the", "ctypes", "function", "wrappers", "in", "conversion", ".", "py" ]
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/containers.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L71-L77
def get_meta_fields(self, fields, kwargs={}): ''' Return a dictionary of metadata fields ''' fields = to_list(fields) meta = self.get_meta() return {field: meta.get(field) for field in fields}
[ "def", "get_meta_fields", "(", "self", ",", "fields", ",", "kwargs", "=", "{", "}", ")", ":", "fields", "=", "to_list", "(", "fields", ")", "meta", "=", "self", ".", "get_meta", "(", ")", "return", "{", "field", ":", "meta", ".", "get", "(", "field", ")", "for", "field", "in", "fields", "}" ]
Return a dictionary of metadata fields
[ "Return", "a", "dictionary", "of", "metadata", "fields" ]
python
train
dhermes/bezier
src/bezier/_algebraic_intersection.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_algebraic_intersection.py#L1225-L1253
def _resolve_and_add(nodes1, s_val, final_s, nodes2, t_val, final_t): """Resolve a computed intersection and add to lists. We perform one Newton step to deal with any residual issues of high-degree polynomial solves (one of which depends on the already approximate ``x_val, y_val``). Args: nodes1 (numpy.ndarray): The nodes in the first curve. s_val (float): The approximate intersection parameter along ``nodes1``. final_s (List[float]): The list of accepted intersection parameters ``s``. nodes2 (numpy.ndarray): The nodes in the second curve. t_val (float): The approximate intersection parameter along ``nodes2``. final_t (List[float]): The list of accepted intersection parameters ``t``. """ s_val, t_val = _intersection_helpers.newton_refine( s_val, nodes1, t_val, nodes2 ) s_val, success_s = _helpers.wiggle_interval(s_val) t_val, success_t = _helpers.wiggle_interval(t_val) if not (success_s and success_t): return final_s.append(s_val) final_t.append(t_val)
[ "def", "_resolve_and_add", "(", "nodes1", ",", "s_val", ",", "final_s", ",", "nodes2", ",", "t_val", ",", "final_t", ")", ":", "s_val", ",", "t_val", "=", "_intersection_helpers", ".", "newton_refine", "(", "s_val", ",", "nodes1", ",", "t_val", ",", "nodes2", ")", "s_val", ",", "success_s", "=", "_helpers", ".", "wiggle_interval", "(", "s_val", ")", "t_val", ",", "success_t", "=", "_helpers", ".", "wiggle_interval", "(", "t_val", ")", "if", "not", "(", "success_s", "and", "success_t", ")", ":", "return", "final_s", ".", "append", "(", "s_val", ")", "final_t", ".", "append", "(", "t_val", ")" ]
Resolve a computed intersection and add to lists. We perform one Newton step to deal with any residual issues of high-degree polynomial solves (one of which depends on the already approximate ``x_val, y_val``). Args: nodes1 (numpy.ndarray): The nodes in the first curve. s_val (float): The approximate intersection parameter along ``nodes1``. final_s (List[float]): The list of accepted intersection parameters ``s``. nodes2 (numpy.ndarray): The nodes in the second curve. t_val (float): The approximate intersection parameter along ``nodes2``. final_t (List[float]): The list of accepted intersection parameters ``t``.
[ "Resolve", "a", "computed", "intersection", "and", "add", "to", "lists", "." ]
python
train
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/entity.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/entity.py#L535-L623
def from_raw_file(self, raw_file, store=current_store, size=None, mimetype=None, original=True, extra_args=None, extra_kwargs=None): """Similar to :meth:`from_file()` except it's lower than that. It assumes that ``raw_file`` is readable and seekable while :meth:`from_file()` only assumes the file is readable. Also it doesn't make any in-memory buffer while :meth:`from_file()` always makes an in-memory buffer and copy the file into the buffer. If ``size`` and ``mimetype`` are passed, it won't try to read image and will use these values instead. It's used for implementing :meth:`from_file()` and :meth:`from_blob()` methods that are higher than it. :param raw_file: the seekable and readable file of the image :type raw_file: file-like object, :class:`file` :param store: the storage to store the file. :data:`~sqlalchemy_imageattach.context.current_store` by default :type store: :class:`~sqlalchemy_imageattach.store.Store` :param size: an optional size of the image. automatically detected if it's omitted :type size: :class:`tuple` :param mimetype: an optional mimetype of the image. automatically detected if it's omitted :type mimetype: :class:`str` :param original: an optional flag which represents whether it is an original image or not. defualt is :const:`True` (meaning original) :type original: :class:`bool` :param extra_args: additional arguments to pass to the model's constructor. :type extra_args: :class:`collections.abc.Sequence` :param extra_kwargs: additional keyword arguments to pass to the model's constructor. :type extra_kwargs: :class:`typing.Mapping`\ [:class:`str`, :class:`object`] :returns: the created image instance :rtype: :class:`Image` .. versionadded:: 1.0.0 The ``extra_args`` and ``extra_kwargs`` options. """ query = self.query cls = query.column_descriptions[0]['type'] if not (isinstance(cls, type) and issubclass(cls, Image)): raise TypeError('the first entity must be a subtype of ' 'sqlalchemy_imageattach.entity.Image') if original and query.session: if store is current_store: for existing in query: test_data = existing.identity_map.copy() test_data.update(self.identity_map) if existing.identity_map == test_data: query.remove(existing) query.session.flush() else: with store_context(store): for existing in query: test_data = existing.identity_map.copy() test_data.update(self.identity_map) if existing.identity_map == test_data: query.remove(existing) query.session.flush() if size is None or mimetype is None: with WandImage(file=raw_file) as wand: size = size or wand.size mimetype = mimetype or wand.mimetype if mimetype.startswith('image/x-'): mimetype = 'image/' + mimetype[8:] if extra_kwargs is None: extra_kwargs = {} extra_kwargs.update(self.identity_map) if extra_args is None: extra_args = () image = cls(size=size, mimetype=mimetype, original=original, *extra_args, **extra_kwargs) raw_file.seek(0) image.file = raw_file image.store = store query.append(image) return image
[ "def", "from_raw_file", "(", "self", ",", "raw_file", ",", "store", "=", "current_store", ",", "size", "=", "None", ",", "mimetype", "=", "None", ",", "original", "=", "True", ",", "extra_args", "=", "None", ",", "extra_kwargs", "=", "None", ")", ":", "query", "=", "self", ".", "query", "cls", "=", "query", ".", "column_descriptions", "[", "0", "]", "[", "'type'", "]", "if", "not", "(", "isinstance", "(", "cls", ",", "type", ")", "and", "issubclass", "(", "cls", ",", "Image", ")", ")", ":", "raise", "TypeError", "(", "'the first entity must be a subtype of '", "'sqlalchemy_imageattach.entity.Image'", ")", "if", "original", "and", "query", ".", "session", ":", "if", "store", "is", "current_store", ":", "for", "existing", "in", "query", ":", "test_data", "=", "existing", ".", "identity_map", ".", "copy", "(", ")", "test_data", ".", "update", "(", "self", ".", "identity_map", ")", "if", "existing", ".", "identity_map", "==", "test_data", ":", "query", ".", "remove", "(", "existing", ")", "query", ".", "session", ".", "flush", "(", ")", "else", ":", "with", "store_context", "(", "store", ")", ":", "for", "existing", "in", "query", ":", "test_data", "=", "existing", ".", "identity_map", ".", "copy", "(", ")", "test_data", ".", "update", "(", "self", ".", "identity_map", ")", "if", "existing", ".", "identity_map", "==", "test_data", ":", "query", ".", "remove", "(", "existing", ")", "query", ".", "session", ".", "flush", "(", ")", "if", "size", "is", "None", "or", "mimetype", "is", "None", ":", "with", "WandImage", "(", "file", "=", "raw_file", ")", "as", "wand", ":", "size", "=", "size", "or", "wand", ".", "size", "mimetype", "=", "mimetype", "or", "wand", ".", "mimetype", "if", "mimetype", ".", "startswith", "(", "'image/x-'", ")", ":", "mimetype", "=", "'image/'", "+", "mimetype", "[", "8", ":", "]", "if", "extra_kwargs", "is", "None", ":", "extra_kwargs", "=", "{", "}", "extra_kwargs", ".", "update", "(", "self", ".", "identity_map", ")", "if", "extra_args", "is", "None", ":", "extra_args", "=", "(", ")", "image", "=", "cls", "(", "size", "=", "size", ",", "mimetype", "=", "mimetype", ",", "original", "=", "original", ",", "*", "extra_args", ",", "*", "*", "extra_kwargs", ")", "raw_file", ".", "seek", "(", "0", ")", "image", ".", "file", "=", "raw_file", "image", ".", "store", "=", "store", "query", ".", "append", "(", "image", ")", "return", "image" ]
Similar to :meth:`from_file()` except it's lower than that. It assumes that ``raw_file`` is readable and seekable while :meth:`from_file()` only assumes the file is readable. Also it doesn't make any in-memory buffer while :meth:`from_file()` always makes an in-memory buffer and copy the file into the buffer. If ``size`` and ``mimetype`` are passed, it won't try to read image and will use these values instead. It's used for implementing :meth:`from_file()` and :meth:`from_blob()` methods that are higher than it. :param raw_file: the seekable and readable file of the image :type raw_file: file-like object, :class:`file` :param store: the storage to store the file. :data:`~sqlalchemy_imageattach.context.current_store` by default :type store: :class:`~sqlalchemy_imageattach.store.Store` :param size: an optional size of the image. automatically detected if it's omitted :type size: :class:`tuple` :param mimetype: an optional mimetype of the image. automatically detected if it's omitted :type mimetype: :class:`str` :param original: an optional flag which represents whether it is an original image or not. defualt is :const:`True` (meaning original) :type original: :class:`bool` :param extra_args: additional arguments to pass to the model's constructor. :type extra_args: :class:`collections.abc.Sequence` :param extra_kwargs: additional keyword arguments to pass to the model's constructor. :type extra_kwargs: :class:`typing.Mapping`\ [:class:`str`, :class:`object`] :returns: the created image instance :rtype: :class:`Image` .. versionadded:: 1.0.0 The ``extra_args`` and ``extra_kwargs`` options.
[ "Similar", "to", ":", "meth", ":", "from_file", "()", "except", "it", "s", "lower", "than", "that", ".", "It", "assumes", "that", "raw_file", "is", "readable", "and", "seekable", "while", ":", "meth", ":", "from_file", "()", "only", "assumes", "the", "file", "is", "readable", ".", "Also", "it", "doesn", "t", "make", "any", "in", "-", "memory", "buffer", "while", ":", "meth", ":", "from_file", "()", "always", "makes", "an", "in", "-", "memory", "buffer", "and", "copy", "the", "file", "into", "the", "buffer", "." ]
python
train
Kozea/pygal
pygal/graph/line.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/line.py#L86-L187
def line(self, serie, rescale=False): """Draw the line serie""" serie_node = self.svg.serie(serie) if rescale and self.secondary_series: points = self._rescale(serie.points) else: points = serie.points view_values = list(map(self.view, points)) if serie.show_dots: for i, (x, y) in enumerate(view_values): if None in (x, y): continue if self.logarithmic: if points[i][1] is None or points[i][1] <= 0: continue if (serie.show_only_major_dots and self.x_labels and i < len(self.x_labels) and self.x_labels[i] not in self._x_labels_major): continue metadata = serie.metadata.get(i) classes = [] if x > self.view.width / 2: classes.append('left') if y > self.view.height / 2: classes.append('top') classes = ' '.join(classes) self._confidence_interval( serie_node['overlay'], x, y, serie.values[i], metadata ) dots = decorate( self.svg, self.svg.node(serie_node['overlay'], class_="dots"), metadata ) val = self._format(serie, i) alter( self.svg.transposable_node( dots, 'circle', cx=x, cy=y, r=serie.dots_size, class_='dot reactive tooltip-trigger' ), metadata ) self._tooltip_data( dots, val, x, y, xlabel=self._get_x_label(i) ) self._static_value( serie_node, val, x + self.style.value_font_size, y + self.style.value_font_size, metadata ) if serie.stroke: if self.interpolate: points = serie.interpolated if rescale and self.secondary_series: points = self._rescale(points) view_values = list(map(self.view, points)) if serie.fill: view_values = self._fill(view_values) if serie.allow_interruptions: # view_values are in form [(x1, y1), (x2, y2)]. We # need to split that into multiple sequences if a # None is present here sequences = [] cur_sequence = [] for x, y in view_values: if y is None and len(cur_sequence) > 0: # emit current subsequence sequences.append(cur_sequence) cur_sequence = [] elif y is None: # just discard continue else: cur_sequence.append((x, y)) # append the element if len(cur_sequence) > 0: # emit last possible sequence sequences.append(cur_sequence) else: # plain vanilla rendering sequences = [view_values] if self.logarithmic: for seq in sequences: for ele in seq[::-1]: y = points[seq.index(ele)][1] if y is None or y <= 0: del seq[seq.index(ele)] for seq in sequences: self.svg.line( serie_node['plot'], seq, close=self._self_close, class_='line reactive' + (' nofill' if not serie.fill else '') )
[ "def", "line", "(", "self", ",", "serie", ",", "rescale", "=", "False", ")", ":", "serie_node", "=", "self", ".", "svg", ".", "serie", "(", "serie", ")", "if", "rescale", "and", "self", ".", "secondary_series", ":", "points", "=", "self", ".", "_rescale", "(", "serie", ".", "points", ")", "else", ":", "points", "=", "serie", ".", "points", "view_values", "=", "list", "(", "map", "(", "self", ".", "view", ",", "points", ")", ")", "if", "serie", ".", "show_dots", ":", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "view_values", ")", ":", "if", "None", "in", "(", "x", ",", "y", ")", ":", "continue", "if", "self", ".", "logarithmic", ":", "if", "points", "[", "i", "]", "[", "1", "]", "is", "None", "or", "points", "[", "i", "]", "[", "1", "]", "<=", "0", ":", "continue", "if", "(", "serie", ".", "show_only_major_dots", "and", "self", ".", "x_labels", "and", "i", "<", "len", "(", "self", ".", "x_labels", ")", "and", "self", ".", "x_labels", "[", "i", "]", "not", "in", "self", ".", "_x_labels_major", ")", ":", "continue", "metadata", "=", "serie", ".", "metadata", ".", "get", "(", "i", ")", "classes", "=", "[", "]", "if", "x", ">", "self", ".", "view", ".", "width", "/", "2", ":", "classes", ".", "append", "(", "'left'", ")", "if", "y", ">", "self", ".", "view", ".", "height", "/", "2", ":", "classes", ".", "append", "(", "'top'", ")", "classes", "=", "' '", ".", "join", "(", "classes", ")", "self", ".", "_confidence_interval", "(", "serie_node", "[", "'overlay'", "]", ",", "x", ",", "y", ",", "serie", ".", "values", "[", "i", "]", ",", "metadata", ")", "dots", "=", "decorate", "(", "self", ".", "svg", ",", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'overlay'", "]", ",", "class_", "=", "\"dots\"", ")", ",", "metadata", ")", "val", "=", "self", ".", "_format", "(", "serie", ",", "i", ")", "alter", "(", "self", ".", "svg", ".", "transposable_node", "(", "dots", ",", "'circle'", ",", "cx", "=", "x", ",", "cy", "=", "y", ",", "r", "=", "serie", ".", "dots_size", ",", "class_", "=", "'dot reactive tooltip-trigger'", ")", ",", "metadata", ")", "self", ".", "_tooltip_data", "(", "dots", ",", "val", ",", "x", ",", "y", ",", "xlabel", "=", "self", ".", "_get_x_label", "(", "i", ")", ")", "self", ".", "_static_value", "(", "serie_node", ",", "val", ",", "x", "+", "self", ".", "style", ".", "value_font_size", ",", "y", "+", "self", ".", "style", ".", "value_font_size", ",", "metadata", ")", "if", "serie", ".", "stroke", ":", "if", "self", ".", "interpolate", ":", "points", "=", "serie", ".", "interpolated", "if", "rescale", "and", "self", ".", "secondary_series", ":", "points", "=", "self", ".", "_rescale", "(", "points", ")", "view_values", "=", "list", "(", "map", "(", "self", ".", "view", ",", "points", ")", ")", "if", "serie", ".", "fill", ":", "view_values", "=", "self", ".", "_fill", "(", "view_values", ")", "if", "serie", ".", "allow_interruptions", ":", "# view_values are in form [(x1, y1), (x2, y2)]. We", "# need to split that into multiple sequences if a", "# None is present here", "sequences", "=", "[", "]", "cur_sequence", "=", "[", "]", "for", "x", ",", "y", "in", "view_values", ":", "if", "y", "is", "None", "and", "len", "(", "cur_sequence", ")", ">", "0", ":", "# emit current subsequence", "sequences", ".", "append", "(", "cur_sequence", ")", "cur_sequence", "=", "[", "]", "elif", "y", "is", "None", ":", "# just discard", "continue", "else", ":", "cur_sequence", ".", "append", "(", "(", "x", ",", "y", ")", ")", "# append the element", "if", "len", "(", "cur_sequence", ")", ">", "0", ":", "# emit last possible sequence", "sequences", ".", "append", "(", "cur_sequence", ")", "else", ":", "# plain vanilla rendering", "sequences", "=", "[", "view_values", "]", "if", "self", ".", "logarithmic", ":", "for", "seq", "in", "sequences", ":", "for", "ele", "in", "seq", "[", ":", ":", "-", "1", "]", ":", "y", "=", "points", "[", "seq", ".", "index", "(", "ele", ")", "]", "[", "1", "]", "if", "y", "is", "None", "or", "y", "<=", "0", ":", "del", "seq", "[", "seq", ".", "index", "(", "ele", ")", "]", "for", "seq", "in", "sequences", ":", "self", ".", "svg", ".", "line", "(", "serie_node", "[", "'plot'", "]", ",", "seq", ",", "close", "=", "self", ".", "_self_close", ",", "class_", "=", "'line reactive'", "+", "(", "' nofill'", "if", "not", "serie", ".", "fill", "else", "''", ")", ")" ]
Draw the line serie
[ "Draw", "the", "line", "serie" ]
python
train
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L202-L230
def _process_blacklist(self, blacklist): """ Process blacklist into set of excluded versions """ # Assume blacklist is correct format since it is checked by PluginLoader blacklist_cache = {} blacklist_cache_old = self._cache.get('blacklist', {}) for entry in blacklist: blackkey = (entry.version, entry.operator) if blackkey in blacklist_cache: continue elif blackkey in blacklist_cache_old: blacklist_cache[blackkey] = blacklist_cache_old[blackkey] else: entry_cache = blacklist_cache[blackkey] = set() blackversion = parse_version(entry.version or '0') blackop = OPERATORS[entry.operator] for key in self: if blackop(parse_version(key), blackversion): entry_cache.add(key) self._cache['blacklist'] = blacklist_cache return set().union(*blacklist_cache.values())
[ "def", "_process_blacklist", "(", "self", ",", "blacklist", ")", ":", "# Assume blacklist is correct format since it is checked by PluginLoader", "blacklist_cache", "=", "{", "}", "blacklist_cache_old", "=", "self", ".", "_cache", ".", "get", "(", "'blacklist'", ",", "{", "}", ")", "for", "entry", "in", "blacklist", ":", "blackkey", "=", "(", "entry", ".", "version", ",", "entry", ".", "operator", ")", "if", "blackkey", "in", "blacklist_cache", ":", "continue", "elif", "blackkey", "in", "blacklist_cache_old", ":", "blacklist_cache", "[", "blackkey", "]", "=", "blacklist_cache_old", "[", "blackkey", "]", "else", ":", "entry_cache", "=", "blacklist_cache", "[", "blackkey", "]", "=", "set", "(", ")", "blackversion", "=", "parse_version", "(", "entry", ".", "version", "or", "'0'", ")", "blackop", "=", "OPERATORS", "[", "entry", ".", "operator", "]", "for", "key", "in", "self", ":", "if", "blackop", "(", "parse_version", "(", "key", ")", ",", "blackversion", ")", ":", "entry_cache", ".", "add", "(", "key", ")", "self", ".", "_cache", "[", "'blacklist'", "]", "=", "blacklist_cache", "return", "set", "(", ")", ".", "union", "(", "*", "blacklist_cache", ".", "values", "(", ")", ")" ]
Process blacklist into set of excluded versions
[ "Process", "blacklist", "into", "set", "of", "excluded", "versions" ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L4758-L4766
def delete_table_column_statistics(self, db_name, tbl_name, col_name): """ Parameters: - db_name - tbl_name - col_name """ self.send_delete_table_column_statistics(db_name, tbl_name, col_name) return self.recv_delete_table_column_statistics()
[ "def", "delete_table_column_statistics", "(", "self", ",", "db_name", ",", "tbl_name", ",", "col_name", ")", ":", "self", ".", "send_delete_table_column_statistics", "(", "db_name", ",", "tbl_name", ",", "col_name", ")", "return", "self", ".", "recv_delete_table_column_statistics", "(", ")" ]
Parameters: - db_name - tbl_name - col_name
[ "Parameters", ":", "-", "db_name", "-", "tbl_name", "-", "col_name" ]
python
train
saltstack/salt
salt/netapi/rest_cherrypy/app.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/app.py#L936-L944
def process_request_body(fn): ''' A decorator to skip a processor function if process_request_body is False ''' @functools.wraps(fn) def wrapped(*args, **kwargs): # pylint: disable=C0111 if cherrypy.request.process_request_body is not False: fn(*args, **kwargs) return wrapped
[ "def", "process_request_body", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=C0111", "if", "cherrypy", ".", "request", ".", "process_request_body", "is", "not", "False", ":", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
A decorator to skip a processor function if process_request_body is False
[ "A", "decorator", "to", "skip", "a", "processor", "function", "if", "process_request_body", "is", "False" ]
python
train
moralrecordings/mrcrowbar
mrcrowbar/statistics.py
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/statistics.py#L34-L59
def ansi_format( self, width=64, height=12 ): """Return a human readable ANSI-terminal printout of the stats. width Custom width for the graph (in characters). height Custom height for the graph (in characters). """ from mrcrowbar.ansi import format_bar_graph_iter if (256 % width) != 0: raise ValueError( 'Width of the histogram must be a divisor of 256' ) elif (width <= 0): raise ValueError( 'Width of the histogram must be greater than zero' ) elif (width > 256): raise ValueError( 'Width of the histogram must be less than or equal to 256' ) buckets = self.histogram( width ) result = [] for line in format_bar_graph_iter( buckets, width=width, height=height ): result.append( ' {}\n'.format( line ) ) result.append( '╘'+('═'*width)+'╛\n' ) result.append( 'entropy: {:.10f}\n'.format( self.entropy ) ) result.append( 'samples: {}'.format( self.samples ) ) return ''.join( result )
[ "def", "ansi_format", "(", "self", ",", "width", "=", "64", ",", "height", "=", "12", ")", ":", "from", "mrcrowbar", ".", "ansi", "import", "format_bar_graph_iter", "if", "(", "256", "%", "width", ")", "!=", "0", ":", "raise", "ValueError", "(", "'Width of the histogram must be a divisor of 256'", ")", "elif", "(", "width", "<=", "0", ")", ":", "raise", "ValueError", "(", "'Width of the histogram must be greater than zero'", ")", "elif", "(", "width", ">", "256", ")", ":", "raise", "ValueError", "(", "'Width of the histogram must be less than or equal to 256'", ")", "buckets", "=", "self", ".", "histogram", "(", "width", ")", "result", "=", "[", "]", "for", "line", "in", "format_bar_graph_iter", "(", "buckets", ",", "width", "=", "width", ",", "height", "=", "height", ")", ":", "result", ".", "append", "(", "' {}\\n'", ".", "format", "(", "line", ")", ")", "result", ".", "append", "(", "'╘'+(", "'", "═", "'*wid", "t", "h)+'╛", "\\", "n", "' )", "", "result", ".", "append", "(", "'entropy: {:.10f}\\n'", ".", "format", "(", "self", ".", "entropy", ")", ")", "result", ".", "append", "(", "'samples: {}'", ".", "format", "(", "self", ".", "samples", ")", ")", "return", "''", ".", "join", "(", "result", ")" ]
Return a human readable ANSI-terminal printout of the stats. width Custom width for the graph (in characters). height Custom height for the graph (in characters).
[ "Return", "a", "human", "readable", "ANSI", "-", "terminal", "printout", "of", "the", "stats", "." ]
python
train
saltstack/salt
salt/utils/files.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/files.py#L435-L476
def fpopen(*args, **kwargs): ''' Shortcut for fopen with extra uid, gid, and mode options. Supported optional Keyword Arguments: mode Explicit mode to set. Mode is anything os.chmod would accept as input for mode. Works only on unix/unix-like systems. uid The uid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this uid. Must be int. Works only on unix/unix-like systems. gid The gid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this gid. Must be int. Works only on unix/unix-like systems. ''' # Remove uid, gid and mode from kwargs if present uid = kwargs.pop('uid', -1) # -1 means no change to current uid gid = kwargs.pop('gid', -1) # -1 means no change to current gid mode = kwargs.pop('mode', None) with fopen(*args, **kwargs) as f_handle: path = args[0] d_stat = os.stat(path) if hasattr(os, 'chown'): # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(path, uid, gid) if mode is not None: mode_part = stat.S_IMODE(d_stat.st_mode) if mode_part != mode: os.chmod(path, (d_stat.st_mode ^ mode_part) | mode) yield f_handle
[ "def", "fpopen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Remove uid, gid and mode from kwargs if present", "uid", "=", "kwargs", ".", "pop", "(", "'uid'", ",", "-", "1", ")", "# -1 means no change to current uid", "gid", "=", "kwargs", ".", "pop", "(", "'gid'", ",", "-", "1", ")", "# -1 means no change to current gid", "mode", "=", "kwargs", ".", "pop", "(", "'mode'", ",", "None", ")", "with", "fopen", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "f_handle", ":", "path", "=", "args", "[", "0", "]", "d_stat", "=", "os", ".", "stat", "(", "path", ")", "if", "hasattr", "(", "os", ",", "'chown'", ")", ":", "# if uid and gid are both -1 then go ahead with", "# no changes at all", "if", "(", "d_stat", ".", "st_uid", "!=", "uid", "or", "d_stat", ".", "st_gid", "!=", "gid", ")", "and", "[", "i", "for", "i", "in", "(", "uid", ",", "gid", ")", "if", "i", "!=", "-", "1", "]", ":", "os", ".", "chown", "(", "path", ",", "uid", ",", "gid", ")", "if", "mode", "is", "not", "None", ":", "mode_part", "=", "stat", ".", "S_IMODE", "(", "d_stat", ".", "st_mode", ")", "if", "mode_part", "!=", "mode", ":", "os", ".", "chmod", "(", "path", ",", "(", "d_stat", ".", "st_mode", "^", "mode_part", ")", "|", "mode", ")", "yield", "f_handle" ]
Shortcut for fopen with extra uid, gid, and mode options. Supported optional Keyword Arguments: mode Explicit mode to set. Mode is anything os.chmod would accept as input for mode. Works only on unix/unix-like systems. uid The uid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this uid. Must be int. Works only on unix/unix-like systems. gid The gid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this gid. Must be int. Works only on unix/unix-like systems.
[ "Shortcut", "for", "fopen", "with", "extra", "uid", "gid", "and", "mode", "options", "." ]
python
train
GNS3/gns3-server
gns3server/compute/iou/iou_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L550-L585
def _networking(self): """ Configures the IOL bridge in uBridge. """ bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) try: # delete any previous bridge if it exists yield from self._ubridge_send("iol_bridge delete {name}".format(name=bridge_name)) except UbridgeError: pass yield from self._ubridge_send("iol_bridge create {name} {bridge_id}".format(name=bridge_name, bridge_id=self.application_id + 512)) bay_id = 0 for adapter in self._adapters: unit_id = 0 for unit in adapter.ports.keys(): nio = adapter.get_nio(unit) if nio and isinstance(nio, NIOUDP): yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, iol_id=self.application_id, bay=bay_id, unit=unit_id, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) if nio.capturing: yield from self._ubridge_send('iol_bridge start_capture {name} "{output_file}" {data_link_type}'.format(name=bridge_name, output_file=nio.pcap_output_file, data_link_type=re.sub("^DLT_", "", nio.pcap_data_link_type))) yield from self._ubridge_apply_filters(bay_id, unit_id, nio.filters) unit_id += 1 bay_id += 1 yield from self._ubridge_send("iol_bridge start {name}".format(name=bridge_name))
[ "def", "_networking", "(", "self", ")", ":", "bridge_name", "=", "\"IOL-BRIDGE-{}\"", ".", "format", "(", "self", ".", "application_id", "+", "512", ")", "try", ":", "# delete any previous bridge if it exists", "yield", "from", "self", ".", "_ubridge_send", "(", "\"iol_bridge delete {name}\"", ".", "format", "(", "name", "=", "bridge_name", ")", ")", "except", "UbridgeError", ":", "pass", "yield", "from", "self", ".", "_ubridge_send", "(", "\"iol_bridge create {name} {bridge_id}\"", ".", "format", "(", "name", "=", "bridge_name", ",", "bridge_id", "=", "self", ".", "application_id", "+", "512", ")", ")", "bay_id", "=", "0", "for", "adapter", "in", "self", ".", "_adapters", ":", "unit_id", "=", "0", "for", "unit", "in", "adapter", ".", "ports", ".", "keys", "(", ")", ":", "nio", "=", "adapter", ".", "get_nio", "(", "unit", ")", "if", "nio", "and", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "yield", "from", "self", ".", "_ubridge_send", "(", "\"iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}\"", ".", "format", "(", "name", "=", "bridge_name", ",", "iol_id", "=", "self", ".", "application_id", ",", "bay", "=", "bay_id", ",", "unit", "=", "unit_id", ",", "lport", "=", "nio", ".", "lport", ",", "rhost", "=", "nio", ".", "rhost", ",", "rport", "=", "nio", ".", "rport", ")", ")", "if", "nio", ".", "capturing", ":", "yield", "from", "self", ".", "_ubridge_send", "(", "'iol_bridge start_capture {name} \"{output_file}\" {data_link_type}'", ".", "format", "(", "name", "=", "bridge_name", ",", "output_file", "=", "nio", ".", "pcap_output_file", ",", "data_link_type", "=", "re", ".", "sub", "(", "\"^DLT_\"", ",", "\"\"", ",", "nio", ".", "pcap_data_link_type", ")", ")", ")", "yield", "from", "self", ".", "_ubridge_apply_filters", "(", "bay_id", ",", "unit_id", ",", "nio", ".", "filters", ")", "unit_id", "+=", "1", "bay_id", "+=", "1", "yield", "from", "self", ".", "_ubridge_send", "(", "\"iol_bridge start {name}\"", ".", "format", "(", "name", "=", "bridge_name", ")", ")" ]
Configures the IOL bridge in uBridge.
[ "Configures", "the", "IOL", "bridge", "in", "uBridge", "." ]
python
train
google/brotli
research/brotlidump.py
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1376-L1383
def makeHexData(self, pos): """Produce hex dump of all data containing the bits from pos to stream.pos """ firstAddress = pos+7>>3 lastAddress = self.stream.pos+7>>3 return ''.join(map('{:02x} '.format, self.stream.data[firstAddress:lastAddress]))
[ "def", "makeHexData", "(", "self", ",", "pos", ")", ":", "firstAddress", "=", "pos", "+", "7", ">>", "3", "lastAddress", "=", "self", ".", "stream", ".", "pos", "+", "7", ">>", "3", "return", "''", ".", "join", "(", "map", "(", "'{:02x} '", ".", "format", ",", "self", ".", "stream", ".", "data", "[", "firstAddress", ":", "lastAddress", "]", ")", ")" ]
Produce hex dump of all data containing the bits from pos to stream.pos
[ "Produce", "hex", "dump", "of", "all", "data", "containing", "the", "bits", "from", "pos", "to", "stream", ".", "pos" ]
python
test
saltstack/salt
salt/modules/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cloud.py#L140-L153
def has_instance(name, provider=None): ''' Return true if the instance is found on a provider CLI Example: .. code-block:: bash salt minionname cloud.has_instance myinstance ''' data = get_instance(name, provider) if data is None: return False return True
[ "def", "has_instance", "(", "name", ",", "provider", "=", "None", ")", ":", "data", "=", "get_instance", "(", "name", ",", "provider", ")", "if", "data", "is", "None", ":", "return", "False", "return", "True" ]
Return true if the instance is found on a provider CLI Example: .. code-block:: bash salt minionname cloud.has_instance myinstance
[ "Return", "true", "if", "the", "instance", "is", "found", "on", "a", "provider" ]
python
train
matplotlib/cmocean
cmocean/plots.py
https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L164-L183
def quick_plot(cmap, fname=None, fig=None, ax=None, N=10): '''Show quick test of a colormap. ''' x = np.linspace(0, 10, N) X, _ = np.meshgrid(x, x) if ax is None: fig = plt.figure() ax = fig.add_subplot(111) mappable = ax.pcolor(X, cmap=cmap) ax.set_title(cmap.name, fontsize=14) ax.set_xticks([]) ax.set_yticks([]) plt.colorbar(mappable) plt.show() if fname is not None: plt.savefig(fname + '.png', bbox_inches='tight')
[ "def", "quick_plot", "(", "cmap", ",", "fname", "=", "None", ",", "fig", "=", "None", ",", "ax", "=", "None", ",", "N", "=", "10", ")", ":", "x", "=", "np", ".", "linspace", "(", "0", ",", "10", ",", "N", ")", "X", ",", "_", "=", "np", ".", "meshgrid", "(", "x", ",", "x", ")", "if", "ax", "is", "None", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "mappable", "=", "ax", ".", "pcolor", "(", "X", ",", "cmap", "=", "cmap", ")", "ax", ".", "set_title", "(", "cmap", ".", "name", ",", "fontsize", "=", "14", ")", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "plt", ".", "colorbar", "(", "mappable", ")", "plt", ".", "show", "(", ")", "if", "fname", "is", "not", "None", ":", "plt", ".", "savefig", "(", "fname", "+", "'.png'", ",", "bbox_inches", "=", "'tight'", ")" ]
Show quick test of a colormap.
[ "Show", "quick", "test", "of", "a", "colormap", "." ]
python
train
aiortc/aiortc
aiortc/rtcicetransport.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcicetransport.py#L270-L294
async def start(self, remoteParameters): """ Initiate connectivity checks. :param: remoteParameters: The :class:`RTCIceParameters` associated with the remote :class:`RTCIceTransport`. """ if self.state == 'closed': raise InvalidStateError('RTCIceTransport is closed') # handle the case where start is already in progress if self.__start is not None: return await self.__start.wait() self.__start = asyncio.Event() self.__setState('checking') self._connection.remote_username = remoteParameters.usernameFragment self._connection.remote_password = remoteParameters.password try: await self._connection.connect() except ConnectionError: self.__setState('failed') else: self.__setState('completed') self.__start.set()
[ "async", "def", "start", "(", "self", ",", "remoteParameters", ")", ":", "if", "self", ".", "state", "==", "'closed'", ":", "raise", "InvalidStateError", "(", "'RTCIceTransport is closed'", ")", "# handle the case where start is already in progress", "if", "self", ".", "__start", "is", "not", "None", ":", "return", "await", "self", ".", "__start", ".", "wait", "(", ")", "self", ".", "__start", "=", "asyncio", ".", "Event", "(", ")", "self", ".", "__setState", "(", "'checking'", ")", "self", ".", "_connection", ".", "remote_username", "=", "remoteParameters", ".", "usernameFragment", "self", ".", "_connection", ".", "remote_password", "=", "remoteParameters", ".", "password", "try", ":", "await", "self", ".", "_connection", ".", "connect", "(", ")", "except", "ConnectionError", ":", "self", ".", "__setState", "(", "'failed'", ")", "else", ":", "self", ".", "__setState", "(", "'completed'", ")", "self", ".", "__start", ".", "set", "(", ")" ]
Initiate connectivity checks. :param: remoteParameters: The :class:`RTCIceParameters` associated with the remote :class:`RTCIceTransport`.
[ "Initiate", "connectivity", "checks", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/aio/index.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L127-L139
async def findTask(self, *args, **kwargs): """ Find Indexed Task Find a task by index path, returning the highest-rank task with that path. If no task exists for the given path, this API end-point will respond with a 404 status. This method gives output: ``v1/indexed-task-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
[ "async", "def", "findTask", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"findTask\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Find Indexed Task Find a task by index path, returning the highest-rank task with that path. If no task exists for the given path, this API end-point will respond with a 404 status. This method gives output: ``v1/indexed-task-response.json#`` This method is ``stable``
[ "Find", "Indexed", "Task" ]
python
train
asyncdef/interfaces
asyncdef/interfaces/engine/itime.py
https://github.com/asyncdef/interfaces/blob/17c589c6ab158e3d9977a6d9da6d5ecd44844285/asyncdef/interfaces/engine/itime.py#L24-L50
def defer( self, func: typing.Callable[[], typing.Any], until: typing.Union[int, float]=-1, ) -> typing.Any: """Defer the execution of a function until some clock value. Args: func (typing.Callable[[], typing.Any]): A callable that accepts no arguments. All return values are ignored. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: typing.Any: An opaque identifier that represents the callback uniquely within the processor. This identifier is used to modify the callback scheduling. Note: The time given should not be considered absolute. It represents the time when the callback becomes available to execute. It may be much later than the given time value when the function actually executes depending on the implementation. """ raise NotImplementedError()
[ "def", "defer", "(", "self", ",", "func", ":", "typing", ".", "Callable", "[", "[", "]", ",", "typing", ".", "Any", "]", ",", "until", ":", "typing", ".", "Union", "[", "int", ",", "float", "]", "=", "-", "1", ",", ")", "->", "typing", ".", "Any", ":", "raise", "NotImplementedError", "(", ")" ]
Defer the execution of a function until some clock value. Args: func (typing.Callable[[], typing.Any]): A callable that accepts no arguments. All return values are ignored. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: typing.Any: An opaque identifier that represents the callback uniquely within the processor. This identifier is used to modify the callback scheduling. Note: The time given should not be considered absolute. It represents the time when the callback becomes available to execute. It may be much later than the given time value when the function actually executes depending on the implementation.
[ "Defer", "the", "execution", "of", "a", "function", "until", "some", "clock", "value", "." ]
python
train
CalebBell/ht
ht/hx.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L2964-L2977
def _NTU_from_P_solver(P1, R1, NTU_min, NTU_max, function, **kwargs): '''Private function to solve the P-NTU method backwards, given the function to use, the upper and lower NTU bounds for consideration, and the desired P1 and R1 values. ''' P1_max = _NTU_from_P_objective(NTU_max, R1, 0, function, **kwargs) P1_min = _NTU_from_P_objective(NTU_min, R1, 0, function, **kwargs) if P1 > P1_max: raise ValueError('No solution possible gives such a high P1; maximum P1=%f at NTU1=%f' %(P1_max, NTU_max)) if P1 < P1_min: raise ValueError('No solution possible gives such a low P1; minimum P1=%f at NTU1=%f' %(P1_min, NTU_min)) # Construct the function as a lambda expression as solvers don't support kwargs to_solve = lambda NTU1: _NTU_from_P_objective(NTU1, R1, P1, function, **kwargs) return ridder(to_solve, NTU_min, NTU_max)
[ "def", "_NTU_from_P_solver", "(", "P1", ",", "R1", ",", "NTU_min", ",", "NTU_max", ",", "function", ",", "*", "*", "kwargs", ")", ":", "P1_max", "=", "_NTU_from_P_objective", "(", "NTU_max", ",", "R1", ",", "0", ",", "function", ",", "*", "*", "kwargs", ")", "P1_min", "=", "_NTU_from_P_objective", "(", "NTU_min", ",", "R1", ",", "0", ",", "function", ",", "*", "*", "kwargs", ")", "if", "P1", ">", "P1_max", ":", "raise", "ValueError", "(", "'No solution possible gives such a high P1; maximum P1=%f at NTU1=%f'", "%", "(", "P1_max", ",", "NTU_max", ")", ")", "if", "P1", "<", "P1_min", ":", "raise", "ValueError", "(", "'No solution possible gives such a low P1; minimum P1=%f at NTU1=%f'", "%", "(", "P1_min", ",", "NTU_min", ")", ")", "# Construct the function as a lambda expression as solvers don't support kwargs", "to_solve", "=", "lambda", "NTU1", ":", "_NTU_from_P_objective", "(", "NTU1", ",", "R1", ",", "P1", ",", "function", ",", "*", "*", "kwargs", ")", "return", "ridder", "(", "to_solve", ",", "NTU_min", ",", "NTU_max", ")" ]
Private function to solve the P-NTU method backwards, given the function to use, the upper and lower NTU bounds for consideration, and the desired P1 and R1 values.
[ "Private", "function", "to", "solve", "the", "P", "-", "NTU", "method", "backwards", "given", "the", "function", "to", "use", "the", "upper", "and", "lower", "NTU", "bounds", "for", "consideration", "and", "the", "desired", "P1", "and", "R1", "values", "." ]
python
train
oanda/v20-python
src/v20/account.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/account.py#L1323-L1391
def list( self, **kwargs ): """ Get a list of all Accounts authorized for the provided token. Args: Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts' ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('accounts') is not None: parsed_body['accounts'] = [ self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts') ] elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
[ "def", "list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'GET'", ",", "'/v3/accounts'", ")", "response", "=", "self", ".", "ctx", ".", "request", "(", "request", ")", "if", "response", ".", "content_type", "is", "None", ":", "return", "response", "if", "not", "response", ".", "content_type", ".", "startswith", "(", "\"application/json\"", ")", ":", "return", "response", "jbody", "=", "json", ".", "loads", "(", "response", ".", "raw_body", ")", "parsed_body", "=", "{", "}", "#", "# Parse responses as defined by the API specification", "#", "if", "str", "(", "response", ".", "status", ")", "==", "\"200\"", ":", "if", "jbody", ".", "get", "(", "'accounts'", ")", "is", "not", "None", ":", "parsed_body", "[", "'accounts'", "]", "=", "[", "self", ".", "ctx", ".", "account", ".", "AccountProperties", ".", "from_dict", "(", "d", ",", "self", ".", "ctx", ")", "for", "d", "in", "jbody", ".", "get", "(", "'accounts'", ")", "]", "elif", "str", "(", "response", ".", "status", ")", "==", "\"401\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"405\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "#", "# Unexpected response status", "#", "else", ":", "parsed_body", "=", "jbody", "response", ".", "body", "=", "parsed_body", "return", "response" ]
Get a list of all Accounts authorized for the provided token. Args: Returns: v20.response.Response containing the results from submitting the request
[ "Get", "a", "list", "of", "all", "Accounts", "authorized", "for", "the", "provided", "token", "." ]
python
train
nickmckay/LiPD-utilities
Matlab/bagit.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Matlab/bagit.py#L366-L374
def is_valid(self, fast=False): """Returns validation success or failure as boolean. Optional fast parameter passed directly to validate(). """ try: self.validate(fast=fast) except BagError: return False return True
[ "def", "is_valid", "(", "self", ",", "fast", "=", "False", ")", ":", "try", ":", "self", ".", "validate", "(", "fast", "=", "fast", ")", "except", "BagError", ":", "return", "False", "return", "True" ]
Returns validation success or failure as boolean. Optional fast parameter passed directly to validate().
[ "Returns", "validation", "success", "or", "failure", "as", "boolean", ".", "Optional", "fast", "parameter", "passed", "directly", "to", "validate", "()", "." ]
python
train
justinabrahms/imhotep
imhotep/reporters/github.py
https://github.com/justinabrahms/imhotep/blob/c6dc365ef34505d7b6837187900e59b00e5fab08/imhotep/reporters/github.py#L97-L108
def post_comment(self, message): """ Comments on an issue, not on a particular line. """ report_url = ( 'https://api.github.com/repos/%s/issues/%s/comments' % (self.repo_name, self.pr_number) ) result = self.requester.post(report_url, {'body': message}) if result.status_code >= 400: log.error("Error posting comment to github. %s", result.json()) return result
[ "def", "post_comment", "(", "self", ",", "message", ")", ":", "report_url", "=", "(", "'https://api.github.com/repos/%s/issues/%s/comments'", "%", "(", "self", ".", "repo_name", ",", "self", ".", "pr_number", ")", ")", "result", "=", "self", ".", "requester", ".", "post", "(", "report_url", ",", "{", "'body'", ":", "message", "}", ")", "if", "result", ".", "status_code", ">=", "400", ":", "log", ".", "error", "(", "\"Error posting comment to github. %s\"", ",", "result", ".", "json", "(", ")", ")", "return", "result" ]
Comments on an issue, not on a particular line.
[ "Comments", "on", "an", "issue", "not", "on", "a", "particular", "line", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xview.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xview.py#L44-L83
def xviewSlot(*typs, **opts): """ Defines a method as being a slot for the XView system. This will validate the method against the signal properties if it is triggered from the dispatcher, taking into account currency and grouping for the widget. You can specify the optional policy keyword to define the specific signal policy for this slot, otherwise it will use its parent view's policy. :param default | <variant> | default return value policy | <XView.SignalPolicy> || None :usage |from projexui.widgets.xviewwidget import xviewSlot | |class A(XView): | @xviewSlot() | def format( self ): | print 'test' """ default = opts.get('default') policy = opts.get('policy') if typs: typ_count = len(typs) else: typ_count = 0 def decorated(func): @wraps(func) def wrapped(*args, **kwds): if ( args and isinstance(args[0], XView) ): validated = args[0].validateSignal(policy) else: validated = True if ( validated ): new_args = args[:typ_count+1] return func(*new_args, **kwds) return default return wrapped return decorated
[ "def", "xviewSlot", "(", "*", "typs", ",", "*", "*", "opts", ")", ":", "default", "=", "opts", ".", "get", "(", "'default'", ")", "policy", "=", "opts", ".", "get", "(", "'policy'", ")", "if", "typs", ":", "typ_count", "=", "len", "(", "typs", ")", "else", ":", "typ_count", "=", "0", "def", "decorated", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "(", "args", "and", "isinstance", "(", "args", "[", "0", "]", ",", "XView", ")", ")", ":", "validated", "=", "args", "[", "0", "]", ".", "validateSignal", "(", "policy", ")", "else", ":", "validated", "=", "True", "if", "(", "validated", ")", ":", "new_args", "=", "args", "[", ":", "typ_count", "+", "1", "]", "return", "func", "(", "*", "new_args", ",", "*", "*", "kwds", ")", "return", "default", "return", "wrapped", "return", "decorated" ]
Defines a method as being a slot for the XView system. This will validate the method against the signal properties if it is triggered from the dispatcher, taking into account currency and grouping for the widget. You can specify the optional policy keyword to define the specific signal policy for this slot, otherwise it will use its parent view's policy. :param default | <variant> | default return value policy | <XView.SignalPolicy> || None :usage |from projexui.widgets.xviewwidget import xviewSlot | |class A(XView): | @xviewSlot() | def format( self ): | print 'test'
[ "Defines", "a", "method", "as", "being", "a", "slot", "for", "the", "XView", "system", ".", "This", "will", "validate", "the", "method", "against", "the", "signal", "properties", "if", "it", "is", "triggered", "from", "the", "dispatcher", "taking", "into", "account", "currency", "and", "grouping", "for", "the", "widget", ".", "You", "can", "specify", "the", "optional", "policy", "keyword", "to", "define", "the", "specific", "signal", "policy", "for", "this", "slot", "otherwise", "it", "will", "use", "its", "parent", "view", "s", "policy", ".", ":", "param", "default", "|", "<variant", ">", "|", "default", "return", "value", "policy", "|", "<XView", ".", "SignalPolicy", ">", "||", "None", ":", "usage", "|from", "projexui", ".", "widgets", ".", "xviewwidget", "import", "xviewSlot", "|", "|class", "A", "(", "XView", ")", ":", "|" ]
python
train
Jaymon/endpoints
endpoints/http.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L434-L447
def parse_query(cls, query): """return name=val&name2=val2 strings into {name: val} dict""" if not query: return {} d = {} # https://docs.python.org/2/library/urlparse.html for k, kv in urlparse.parse_qs(query, True, strict_parsing=True).items(): #k = k.rstrip("[]") # strip out php type array designated variables if len(kv) > 1: d[k] = kv else: d[k] = kv[0] return d
[ "def", "parse_query", "(", "cls", ",", "query", ")", ":", "if", "not", "query", ":", "return", "{", "}", "d", "=", "{", "}", "# https://docs.python.org/2/library/urlparse.html", "for", "k", ",", "kv", "in", "urlparse", ".", "parse_qs", "(", "query", ",", "True", ",", "strict_parsing", "=", "True", ")", ".", "items", "(", ")", ":", "#k = k.rstrip(\"[]\") # strip out php type array designated variables", "if", "len", "(", "kv", ")", ">", "1", ":", "d", "[", "k", "]", "=", "kv", "else", ":", "d", "[", "k", "]", "=", "kv", "[", "0", "]", "return", "d" ]
return name=val&name2=val2 strings into {name: val} dict
[ "return", "name", "=", "val&name2", "=", "val2", "strings", "into", "{", "name", ":", "val", "}", "dict" ]
python
train
spencerahill/aospy
aospy/calc.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L349-L363
def _compute_full_ts(self, data): """Perform calculation and create yearly timeseries at each point.""" # Get results at each desired timestep and spatial point. full_ts, dt = self._compute(data) # Vertically integrate. vert_types = ('vert_int', 'vert_av') if self.dtype_out_vert in vert_types and self.var.def_vert: dp = self._get_input_data(_DP_VARS[self.dtype_in_vert], self.start_date, self.end_date) full_ts = utils.vertcoord.int_dp_g(full_ts, dp) if self.dtype_out_vert == 'vert_av': ps = self._get_input_data(utils.vertcoord.ps, self.start_date, self.end_date) full_ts *= (GRAV_EARTH / ps) return full_ts, dt
[ "def", "_compute_full_ts", "(", "self", ",", "data", ")", ":", "# Get results at each desired timestep and spatial point.", "full_ts", ",", "dt", "=", "self", ".", "_compute", "(", "data", ")", "# Vertically integrate.", "vert_types", "=", "(", "'vert_int'", ",", "'vert_av'", ")", "if", "self", ".", "dtype_out_vert", "in", "vert_types", "and", "self", ".", "var", ".", "def_vert", ":", "dp", "=", "self", ".", "_get_input_data", "(", "_DP_VARS", "[", "self", ".", "dtype_in_vert", "]", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ")", "full_ts", "=", "utils", ".", "vertcoord", ".", "int_dp_g", "(", "full_ts", ",", "dp", ")", "if", "self", ".", "dtype_out_vert", "==", "'vert_av'", ":", "ps", "=", "self", ".", "_get_input_data", "(", "utils", ".", "vertcoord", ".", "ps", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ")", "full_ts", "*=", "(", "GRAV_EARTH", "/", "ps", ")", "return", "full_ts", ",", "dt" ]
Perform calculation and create yearly timeseries at each point.
[ "Perform", "calculation", "and", "create", "yearly", "timeseries", "at", "each", "point", "." ]
python
train
SheffieldML/GPy
GPy/inference/latent_function_inference/vardtc_md.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/inference/latent_function_inference/vardtc_md.py#L32-L171
def inference(self, kern, X, Z, likelihood, Y, indexD, output_dim, Y_metadata=None, Lm=None, dL_dKmm=None, Kuu_sigma=None): """ The first phase of inference: Compute: log-likelihood, dL_dKmm Cached intermediate results: Kmm, KmmInv, """ input_dim = Z.shape[0] uncertain_inputs = isinstance(X, VariationalPosterior) beta = 1./likelihood.variance if len(beta)==1: beta = np.zeros(output_dim)+beta beta_exp = np.zeros(indexD.shape[0]) for d in range(output_dim): beta_exp[indexD==d] = beta[d] psi0, psi1, psi2 = self.gatherPsiStat(kern, X, Z, Y, beta, uncertain_inputs) psi2_sum = (beta_exp[:,None,None]*psi2).sum(0)/output_dim #====================================================================== # Compute Common Components #====================================================================== Kmm = kern.K(Z).copy() if Kuu_sigma is not None: diag.add(Kmm, Kuu_sigma) else: diag.add(Kmm, self.const_jitter) Lm = jitchol(Kmm) logL = 0. dL_dthetaL = np.zeros(output_dim) dL_dKmm = np.zeros_like(Kmm) dL_dpsi0 = np.zeros_like(psi0) dL_dpsi1 = np.zeros_like(psi1) dL_dpsi2 = np.zeros_like(psi2) wv = np.empty((Kmm.shape[0],output_dim)) for d in range(output_dim): idx_d = indexD==d Y_d = Y[idx_d] N_d = Y_d.shape[0] beta_d = beta[d] psi2_d = psi2[idx_d].sum(0)*beta_d psi1Y = Y_d.T.dot(psi1[idx_d])*beta_d psi0_d = psi0[idx_d].sum()*beta_d YRY_d = np.square(Y_d).sum()*beta_d LmInvPsi2LmInvT = backsub_both_sides(Lm, psi2_d, 'right') Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT LL = jitchol(Lambda) LmLL = Lm.dot(LL) b = dtrtrs(LmLL, psi1Y.T)[0].T bbt = np.square(b).sum() v = dtrtrs(LmLL, b.T, trans=1)[0].T LLinvPsi1TYYTPsi1LLinvT = tdot(b.T) tmp = -backsub_both_sides(LL, LLinvPsi1TYYTPsi1LLinvT) dL_dpsi2R = backsub_both_sides(Lm, tmp+np.eye(input_dim))/2 logL_R = -N_d*np.log(beta_d) logL += -((N_d*log_2_pi+logL_R+psi0_d-np.trace(LmInvPsi2LmInvT))+YRY_d- bbt)/2. dL_dKmm += dL_dpsi2R - backsub_both_sides(Lm, LmInvPsi2LmInvT)/2 dL_dthetaL[d:d+1] = (YRY_d*beta_d + beta_d*psi0_d - N_d*beta_d)/2. - beta_d*(dL_dpsi2R*psi2_d).sum() - beta_d*np.trace(LLinvPsi1TYYTPsi1LLinvT) dL_dpsi0[idx_d] = -beta_d/2. dL_dpsi1[idx_d] = beta_d*np.dot(Y_d,v) dL_dpsi2[idx_d] = beta_d*dL_dpsi2R wv[:,d] = v LmInvPsi2LmInvT = backsub_both_sides(Lm, psi2_sum, 'right') Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT LL = jitchol(Lambda) LmLL = Lm.dot(LL) logdet_L = 2.*np.sum(np.log(np.diag(LL))) dL_dpsi2R_common = dpotri(LmLL)[0]/-2. dL_dpsi2 += dL_dpsi2R_common[None,:,:]*beta_exp[:,None,None] for d in range(output_dim): dL_dthetaL[d] += (dL_dpsi2R_common*psi2[indexD==d].sum(0)).sum()*-beta[d]*beta[d] dL_dKmm += dL_dpsi2R_common*output_dim logL += -output_dim*logdet_L/2. #====================================================================== # Compute dL_dKmm #====================================================================== # dL_dKmm = dL_dpsi2R - output_dim* backsub_both_sides(Lm, LmInvPsi2LmInvT)/2 #LmInv.T.dot(LmInvPsi2LmInvT).dot(LmInv)/2. #====================================================================== # Compute the Posterior distribution of inducing points p(u|Y) #====================================================================== LLInvLmT = dtrtrs(LL, Lm.T)[0] cov = tdot(LLInvLmT.T) wd_inv = backsub_both_sides(Lm, np.eye(input_dim)- backsub_both_sides(LL, np.identity(input_dim), transpose='left'), transpose='left') post = Posterior(woodbury_inv=wd_inv, woodbury_vector=wv, K=Kmm, mean=None, cov=cov, K_chol=Lm) #====================================================================== # Compute dL_dthetaL for uncertian input and non-heter noise #====================================================================== # for d in range(output_dim): # dL_dthetaL[d:d+1] += - beta[d]*beta[d]*(dL_dpsi2R[None,:,:] * psi2[indexD==d]/output_dim).sum() # dL_dthetaL += - (dL_dpsi2R[None,:,:] * psi2_sum*D beta*(dL_dpsi2R*psi2).sum() #====================================================================== # Compute dL_dpsi #====================================================================== if not uncertain_inputs: dL_dpsi1 += (psi1[:,None,:]*dL_dpsi2).sum(2)*2. if uncertain_inputs: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dpsi0':dL_dpsi0, 'dL_dpsi1':dL_dpsi1, 'dL_dpsi2':dL_dpsi2, 'dL_dthetaL':dL_dthetaL} else: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':dL_dpsi0, 'dL_dKnm':dL_dpsi1, 'dL_dthetaL':dL_dthetaL} return post, logL, grad_dict
[ "def", "inference", "(", "self", ",", "kern", ",", "X", ",", "Z", ",", "likelihood", ",", "Y", ",", "indexD", ",", "output_dim", ",", "Y_metadata", "=", "None", ",", "Lm", "=", "None", ",", "dL_dKmm", "=", "None", ",", "Kuu_sigma", "=", "None", ")", ":", "input_dim", "=", "Z", ".", "shape", "[", "0", "]", "uncertain_inputs", "=", "isinstance", "(", "X", ",", "VariationalPosterior", ")", "beta", "=", "1.", "/", "likelihood", ".", "variance", "if", "len", "(", "beta", ")", "==", "1", ":", "beta", "=", "np", ".", "zeros", "(", "output_dim", ")", "+", "beta", "beta_exp", "=", "np", ".", "zeros", "(", "indexD", ".", "shape", "[", "0", "]", ")", "for", "d", "in", "range", "(", "output_dim", ")", ":", "beta_exp", "[", "indexD", "==", "d", "]", "=", "beta", "[", "d", "]", "psi0", ",", "psi1", ",", "psi2", "=", "self", ".", "gatherPsiStat", "(", "kern", ",", "X", ",", "Z", ",", "Y", ",", "beta", ",", "uncertain_inputs", ")", "psi2_sum", "=", "(", "beta_exp", "[", ":", ",", "None", ",", "None", "]", "*", "psi2", ")", ".", "sum", "(", "0", ")", "/", "output_dim", "#======================================================================", "# Compute Common Components", "#======================================================================", "Kmm", "=", "kern", ".", "K", "(", "Z", ")", ".", "copy", "(", ")", "if", "Kuu_sigma", "is", "not", "None", ":", "diag", ".", "add", "(", "Kmm", ",", "Kuu_sigma", ")", "else", ":", "diag", ".", "add", "(", "Kmm", ",", "self", ".", "const_jitter", ")", "Lm", "=", "jitchol", "(", "Kmm", ")", "logL", "=", "0.", "dL_dthetaL", "=", "np", ".", "zeros", "(", "output_dim", ")", "dL_dKmm", "=", "np", ".", "zeros_like", "(", "Kmm", ")", "dL_dpsi0", "=", "np", ".", "zeros_like", "(", "psi0", ")", "dL_dpsi1", "=", "np", ".", "zeros_like", "(", "psi1", ")", "dL_dpsi2", "=", "np", ".", "zeros_like", "(", "psi2", ")", "wv", "=", "np", ".", "empty", "(", "(", "Kmm", ".", "shape", "[", "0", "]", ",", "output_dim", ")", ")", "for", "d", "in", "range", "(", "output_dim", ")", ":", "idx_d", "=", "indexD", "==", "d", "Y_d", "=", "Y", "[", "idx_d", "]", "N_d", "=", "Y_d", ".", "shape", "[", "0", "]", "beta_d", "=", "beta", "[", "d", "]", "psi2_d", "=", "psi2", "[", "idx_d", "]", ".", "sum", "(", "0", ")", "*", "beta_d", "psi1Y", "=", "Y_d", ".", "T", ".", "dot", "(", "psi1", "[", "idx_d", "]", ")", "*", "beta_d", "psi0_d", "=", "psi0", "[", "idx_d", "]", ".", "sum", "(", ")", "*", "beta_d", "YRY_d", "=", "np", ".", "square", "(", "Y_d", ")", ".", "sum", "(", ")", "*", "beta_d", "LmInvPsi2LmInvT", "=", "backsub_both_sides", "(", "Lm", ",", "psi2_d", ",", "'right'", ")", "Lambda", "=", "np", ".", "eye", "(", "Kmm", ".", "shape", "[", "0", "]", ")", "+", "LmInvPsi2LmInvT", "LL", "=", "jitchol", "(", "Lambda", ")", "LmLL", "=", "Lm", ".", "dot", "(", "LL", ")", "b", "=", "dtrtrs", "(", "LmLL", ",", "psi1Y", ".", "T", ")", "[", "0", "]", ".", "T", "bbt", "=", "np", ".", "square", "(", "b", ")", ".", "sum", "(", ")", "v", "=", "dtrtrs", "(", "LmLL", ",", "b", ".", "T", ",", "trans", "=", "1", ")", "[", "0", "]", ".", "T", "LLinvPsi1TYYTPsi1LLinvT", "=", "tdot", "(", "b", ".", "T", ")", "tmp", "=", "-", "backsub_both_sides", "(", "LL", ",", "LLinvPsi1TYYTPsi1LLinvT", ")", "dL_dpsi2R", "=", "backsub_both_sides", "(", "Lm", ",", "tmp", "+", "np", ".", "eye", "(", "input_dim", ")", ")", "/", "2", "logL_R", "=", "-", "N_d", "*", "np", ".", "log", "(", "beta_d", ")", "logL", "+=", "-", "(", "(", "N_d", "*", "log_2_pi", "+", "logL_R", "+", "psi0_d", "-", "np", ".", "trace", "(", "LmInvPsi2LmInvT", ")", ")", "+", "YRY_d", "-", "bbt", ")", "/", "2.", "dL_dKmm", "+=", "dL_dpsi2R", "-", "backsub_both_sides", "(", "Lm", ",", "LmInvPsi2LmInvT", ")", "/", "2", "dL_dthetaL", "[", "d", ":", "d", "+", "1", "]", "=", "(", "YRY_d", "*", "beta_d", "+", "beta_d", "*", "psi0_d", "-", "N_d", "*", "beta_d", ")", "/", "2.", "-", "beta_d", "*", "(", "dL_dpsi2R", "*", "psi2_d", ")", ".", "sum", "(", ")", "-", "beta_d", "*", "np", ".", "trace", "(", "LLinvPsi1TYYTPsi1LLinvT", ")", "dL_dpsi0", "[", "idx_d", "]", "=", "-", "beta_d", "/", "2.", "dL_dpsi1", "[", "idx_d", "]", "=", "beta_d", "*", "np", ".", "dot", "(", "Y_d", ",", "v", ")", "dL_dpsi2", "[", "idx_d", "]", "=", "beta_d", "*", "dL_dpsi2R", "wv", "[", ":", ",", "d", "]", "=", "v", "LmInvPsi2LmInvT", "=", "backsub_both_sides", "(", "Lm", ",", "psi2_sum", ",", "'right'", ")", "Lambda", "=", "np", ".", "eye", "(", "Kmm", ".", "shape", "[", "0", "]", ")", "+", "LmInvPsi2LmInvT", "LL", "=", "jitchol", "(", "Lambda", ")", "LmLL", "=", "Lm", ".", "dot", "(", "LL", ")", "logdet_L", "=", "2.", "*", "np", ".", "sum", "(", "np", ".", "log", "(", "np", ".", "diag", "(", "LL", ")", ")", ")", "dL_dpsi2R_common", "=", "dpotri", "(", "LmLL", ")", "[", "0", "]", "/", "-", "2.", "dL_dpsi2", "+=", "dL_dpsi2R_common", "[", "None", ",", ":", ",", ":", "]", "*", "beta_exp", "[", ":", ",", "None", ",", "None", "]", "for", "d", "in", "range", "(", "output_dim", ")", ":", "dL_dthetaL", "[", "d", "]", "+=", "(", "dL_dpsi2R_common", "*", "psi2", "[", "indexD", "==", "d", "]", ".", "sum", "(", "0", ")", ")", ".", "sum", "(", ")", "*", "-", "beta", "[", "d", "]", "*", "beta", "[", "d", "]", "dL_dKmm", "+=", "dL_dpsi2R_common", "*", "output_dim", "logL", "+=", "-", "output_dim", "*", "logdet_L", "/", "2.", "#======================================================================", "# Compute dL_dKmm", "#======================================================================", "# dL_dKmm = dL_dpsi2R - output_dim* backsub_both_sides(Lm, LmInvPsi2LmInvT)/2 #LmInv.T.dot(LmInvPsi2LmInvT).dot(LmInv)/2.", "#======================================================================", "# Compute the Posterior distribution of inducing points p(u|Y)", "#======================================================================", "LLInvLmT", "=", "dtrtrs", "(", "LL", ",", "Lm", ".", "T", ")", "[", "0", "]", "cov", "=", "tdot", "(", "LLInvLmT", ".", "T", ")", "wd_inv", "=", "backsub_both_sides", "(", "Lm", ",", "np", ".", "eye", "(", "input_dim", ")", "-", "backsub_both_sides", "(", "LL", ",", "np", ".", "identity", "(", "input_dim", ")", ",", "transpose", "=", "'left'", ")", ",", "transpose", "=", "'left'", ")", "post", "=", "Posterior", "(", "woodbury_inv", "=", "wd_inv", ",", "woodbury_vector", "=", "wv", ",", "K", "=", "Kmm", ",", "mean", "=", "None", ",", "cov", "=", "cov", ",", "K_chol", "=", "Lm", ")", "#======================================================================", "# Compute dL_dthetaL for uncertian input and non-heter noise", "#======================================================================", "# for d in range(output_dim):", "# dL_dthetaL[d:d+1] += - beta[d]*beta[d]*(dL_dpsi2R[None,:,:] * psi2[indexD==d]/output_dim).sum()", "# dL_dthetaL += - (dL_dpsi2R[None,:,:] * psi2_sum*D beta*(dL_dpsi2R*psi2).sum()", "#======================================================================", "# Compute dL_dpsi", "#======================================================================", "if", "not", "uncertain_inputs", ":", "dL_dpsi1", "+=", "(", "psi1", "[", ":", ",", "None", ",", ":", "]", "*", "dL_dpsi2", ")", ".", "sum", "(", "2", ")", "*", "2.", "if", "uncertain_inputs", ":", "grad_dict", "=", "{", "'dL_dKmm'", ":", "dL_dKmm", ",", "'dL_dpsi0'", ":", "dL_dpsi0", ",", "'dL_dpsi1'", ":", "dL_dpsi1", ",", "'dL_dpsi2'", ":", "dL_dpsi2", ",", "'dL_dthetaL'", ":", "dL_dthetaL", "}", "else", ":", "grad_dict", "=", "{", "'dL_dKmm'", ":", "dL_dKmm", ",", "'dL_dKdiag'", ":", "dL_dpsi0", ",", "'dL_dKnm'", ":", "dL_dpsi1", ",", "'dL_dthetaL'", ":", "dL_dthetaL", "}", "return", "post", ",", "logL", ",", "grad_dict" ]
The first phase of inference: Compute: log-likelihood, dL_dKmm Cached intermediate results: Kmm, KmmInv,
[ "The", "first", "phase", "of", "inference", ":", "Compute", ":", "log", "-", "likelihood", "dL_dKmm" ]
python
train
uber/doubles
doubles/target.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/target.py#L96-L112
def hijack_attr(self, attr_name): """Hijack an attribute on the target object. Updates the underlying class and delegating the call to the instance. This allows specially-handled attributes like __call__, __enter__, and __exit__ to be mocked on a per-instance basis. :param str attr_name: the name of the attribute to hijack """ if not self._original_attr(attr_name): setattr( self.obj.__class__, attr_name, _proxy_class_method_to_instance( getattr(self.obj.__class__, attr_name, None), attr_name ), )
[ "def", "hijack_attr", "(", "self", ",", "attr_name", ")", ":", "if", "not", "self", ".", "_original_attr", "(", "attr_name", ")", ":", "setattr", "(", "self", ".", "obj", ".", "__class__", ",", "attr_name", ",", "_proxy_class_method_to_instance", "(", "getattr", "(", "self", ".", "obj", ".", "__class__", ",", "attr_name", ",", "None", ")", ",", "attr_name", ")", ",", ")" ]
Hijack an attribute on the target object. Updates the underlying class and delegating the call to the instance. This allows specially-handled attributes like __call__, __enter__, and __exit__ to be mocked on a per-instance basis. :param str attr_name: the name of the attribute to hijack
[ "Hijack", "an", "attribute", "on", "the", "target", "object", "." ]
python
train
UCL-INGI/INGInious
inginious/common/tasks.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/tasks.py#L132-L160
def check_answer(self, task_input, language): """ Verify the answers in task_input. Returns six values 1st: True the input is **currently** valid. (may become invalid after running the code), False else 2nd: True if the input needs to be run in the VM, False else 3rd: Main message, as a list (that can be join with \n or <br/> for example) 4th: Problem specific message, as a dictionnary (tuple of result/text) 5th: Number of subproblems that (already) contain errors. <= Number of subproblems 6th: Number of errors in MCQ problems. Not linked to the number of subproblems """ valid = True need_launch = False main_message = [] problem_messages = {} error_count = 0 multiple_choice_error_count = 0 for problem in self._problems: problem_is_valid, problem_main_message, problem_s_messages, problem_mc_error_count = problem.check_answer(task_input, language) if problem_is_valid is None: need_launch = True elif problem_is_valid == False: error_count += 1 valid = False if problem_main_message is not None: main_message.append(problem_main_message) if problem_s_messages is not None: problem_messages[problem.get_id()] = (("success" if problem_is_valid else "failed"), problem_s_messages) multiple_choice_error_count += problem_mc_error_count return valid, need_launch, main_message, problem_messages, error_count, multiple_choice_error_count
[ "def", "check_answer", "(", "self", ",", "task_input", ",", "language", ")", ":", "valid", "=", "True", "need_launch", "=", "False", "main_message", "=", "[", "]", "problem_messages", "=", "{", "}", "error_count", "=", "0", "multiple_choice_error_count", "=", "0", "for", "problem", "in", "self", ".", "_problems", ":", "problem_is_valid", ",", "problem_main_message", ",", "problem_s_messages", ",", "problem_mc_error_count", "=", "problem", ".", "check_answer", "(", "task_input", ",", "language", ")", "if", "problem_is_valid", "is", "None", ":", "need_launch", "=", "True", "elif", "problem_is_valid", "==", "False", ":", "error_count", "+=", "1", "valid", "=", "False", "if", "problem_main_message", "is", "not", "None", ":", "main_message", ".", "append", "(", "problem_main_message", ")", "if", "problem_s_messages", "is", "not", "None", ":", "problem_messages", "[", "problem", ".", "get_id", "(", ")", "]", "=", "(", "(", "\"success\"", "if", "problem_is_valid", "else", "\"failed\"", ")", ",", "problem_s_messages", ")", "multiple_choice_error_count", "+=", "problem_mc_error_count", "return", "valid", ",", "need_launch", ",", "main_message", ",", "problem_messages", ",", "error_count", ",", "multiple_choice_error_count" ]
Verify the answers in task_input. Returns six values 1st: True the input is **currently** valid. (may become invalid after running the code), False else 2nd: True if the input needs to be run in the VM, False else 3rd: Main message, as a list (that can be join with \n or <br/> for example) 4th: Problem specific message, as a dictionnary (tuple of result/text) 5th: Number of subproblems that (already) contain errors. <= Number of subproblems 6th: Number of errors in MCQ problems. Not linked to the number of subproblems
[ "Verify", "the", "answers", "in", "task_input", ".", "Returns", "six", "values", "1st", ":", "True", "the", "input", "is", "**", "currently", "**", "valid", ".", "(", "may", "become", "invalid", "after", "running", "the", "code", ")", "False", "else", "2nd", ":", "True", "if", "the", "input", "needs", "to", "be", "run", "in", "the", "VM", "False", "else", "3rd", ":", "Main", "message", "as", "a", "list", "(", "that", "can", "be", "join", "with", "\\", "n", "or", "<br", "/", ">", "for", "example", ")", "4th", ":", "Problem", "specific", "message", "as", "a", "dictionnary", "(", "tuple", "of", "result", "/", "text", ")", "5th", ":", "Number", "of", "subproblems", "that", "(", "already", ")", "contain", "errors", ".", "<", "=", "Number", "of", "subproblems", "6th", ":", "Number", "of", "errors", "in", "MCQ", "problems", ".", "Not", "linked", "to", "the", "number", "of", "subproblems" ]
python
train
CityOfZion/neo-python-core
neocore/IO/BinaryReader.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/IO/BinaryReader.py#L284-L295
def ReadVarString(self, max=sys.maxsize): """ Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max (int): (Optional) maximum number of bytes to read. Returns: bytes: """ length = self.ReadVarInt(max) return self.unpack(str(length) + 's', length)
[ "def", "ReadVarString", "(", "self", ",", "max", "=", "sys", ".", "maxsize", ")", ":", "length", "=", "self", ".", "ReadVarInt", "(", "max", ")", "return", "self", ".", "unpack", "(", "str", "(", "length", ")", "+", "'s'", ",", "length", ")" ]
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max (int): (Optional) maximum number of bytes to read. Returns: bytes:
[ "Similar", "to", "ReadString", "but", "expects", "a", "variable", "length", "indicator", "instead", "of", "the", "fixed", "1", "byte", "indicator", "." ]
python
train
pantsbuild/pants
src/python/pants/java/nailgun_client.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_client.py#L110-L166
def _process_session(self): """Process the outputs of the nailgun session. :raises: :class:`NailgunProtocol.ProcessStreamTimeout` if a timeout set from a signal handler with .set_exit_timeout() completes. :raises: :class:`Exception` if the session completes before the timeout, the `reason` argument to .set_exit_timeout() will be raised.""" try: for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True, timeout_object=self): # TODO(#6579): assert that we have at this point received all the chunk types in # ChunkType.REQUEST_TYPES, then require PID and PGRP (exactly once?), and then allow any of # ChunkType.EXECUTION_TYPES. if chunk_type == ChunkType.STDOUT: self._write_flush(self._stdout, payload) elif chunk_type == ChunkType.STDERR: self._write_flush(self._stderr, payload) elif chunk_type == ChunkType.EXIT: self._write_flush(self._stdout) self._write_flush(self._stderr) return int(payload) elif chunk_type == ChunkType.PID: self.remote_pid = int(payload) self.remote_process_cmdline = psutil.Process(self.remote_pid).cmdline() if self._remote_pid_callback: self._remote_pid_callback(self.remote_pid) elif chunk_type == ChunkType.PGRP: self.remote_pgrp = int(payload) if self._remote_pgrp_callback: self._remote_pgrp_callback(self.remote_pgrp) elif chunk_type == ChunkType.START_READING_INPUT: self._maybe_start_input_writer() else: raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload)) except NailgunProtocol.ProcessStreamTimeout as e: assert(self.remote_pid is not None) # NB: We overwrite the process title in the pantsd-runner process, which causes it to have an # argv with lots of empty spaces for some reason. We filter those out and pretty-print the # rest here. filtered_remote_cmdline = safe_shlex_join( arg for arg in self.remote_process_cmdline if arg != '') logger.warning( "timed out when attempting to gracefully shut down the remote client executing \"{}\". " "sending SIGKILL to the remote client at pid: {}. message: {}" .format(filtered_remote_cmdline, self.remote_pid, e)) finally: # Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in # NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped. self._maybe_stop_input_writer() # If an asynchronous error was set at any point (such as in a signal handler), we want to make # sure we clean up the remote process before exiting with error. if self._exit_reason: if self.remote_pgrp: safe_kill(self.remote_pgrp, signal.SIGKILL) if self.remote_pid: safe_kill(self.remote_pid, signal.SIGKILL) raise self._exit_reason
[ "def", "_process_session", "(", "self", ")", ":", "try", ":", "for", "chunk_type", ",", "payload", "in", "self", ".", "iter_chunks", "(", "self", ".", "_sock", ",", "return_bytes", "=", "True", ",", "timeout_object", "=", "self", ")", ":", "# TODO(#6579): assert that we have at this point received all the chunk types in", "# ChunkType.REQUEST_TYPES, then require PID and PGRP (exactly once?), and then allow any of", "# ChunkType.EXECUTION_TYPES.", "if", "chunk_type", "==", "ChunkType", ".", "STDOUT", ":", "self", ".", "_write_flush", "(", "self", ".", "_stdout", ",", "payload", ")", "elif", "chunk_type", "==", "ChunkType", ".", "STDERR", ":", "self", ".", "_write_flush", "(", "self", ".", "_stderr", ",", "payload", ")", "elif", "chunk_type", "==", "ChunkType", ".", "EXIT", ":", "self", ".", "_write_flush", "(", "self", ".", "_stdout", ")", "self", ".", "_write_flush", "(", "self", ".", "_stderr", ")", "return", "int", "(", "payload", ")", "elif", "chunk_type", "==", "ChunkType", ".", "PID", ":", "self", ".", "remote_pid", "=", "int", "(", "payload", ")", "self", ".", "remote_process_cmdline", "=", "psutil", ".", "Process", "(", "self", ".", "remote_pid", ")", ".", "cmdline", "(", ")", "if", "self", ".", "_remote_pid_callback", ":", "self", ".", "_remote_pid_callback", "(", "self", ".", "remote_pid", ")", "elif", "chunk_type", "==", "ChunkType", ".", "PGRP", ":", "self", ".", "remote_pgrp", "=", "int", "(", "payload", ")", "if", "self", ".", "_remote_pgrp_callback", ":", "self", ".", "_remote_pgrp_callback", "(", "self", ".", "remote_pgrp", ")", "elif", "chunk_type", "==", "ChunkType", ".", "START_READING_INPUT", ":", "self", ".", "_maybe_start_input_writer", "(", ")", "else", ":", "raise", "self", ".", "ProtocolError", "(", "'received unexpected chunk {} -> {}'", ".", "format", "(", "chunk_type", ",", "payload", ")", ")", "except", "NailgunProtocol", ".", "ProcessStreamTimeout", "as", "e", ":", "assert", "(", "self", ".", "remote_pid", "is", "not", "None", ")", "# NB: We overwrite the process title in the pantsd-runner process, which causes it to have an", "# argv with lots of empty spaces for some reason. We filter those out and pretty-print the", "# rest here.", "filtered_remote_cmdline", "=", "safe_shlex_join", "(", "arg", "for", "arg", "in", "self", ".", "remote_process_cmdline", "if", "arg", "!=", "''", ")", "logger", ".", "warning", "(", "\"timed out when attempting to gracefully shut down the remote client executing \\\"{}\\\". \"", "\"sending SIGKILL to the remote client at pid: {}. message: {}\"", ".", "format", "(", "filtered_remote_cmdline", ",", "self", ".", "remote_pid", ",", "e", ")", ")", "finally", ":", "# Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in", "# NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped.", "self", ".", "_maybe_stop_input_writer", "(", ")", "# If an asynchronous error was set at any point (such as in a signal handler), we want to make", "# sure we clean up the remote process before exiting with error.", "if", "self", ".", "_exit_reason", ":", "if", "self", ".", "remote_pgrp", ":", "safe_kill", "(", "self", ".", "remote_pgrp", ",", "signal", ".", "SIGKILL", ")", "if", "self", ".", "remote_pid", ":", "safe_kill", "(", "self", ".", "remote_pid", ",", "signal", ".", "SIGKILL", ")", "raise", "self", ".", "_exit_reason" ]
Process the outputs of the nailgun session. :raises: :class:`NailgunProtocol.ProcessStreamTimeout` if a timeout set from a signal handler with .set_exit_timeout() completes. :raises: :class:`Exception` if the session completes before the timeout, the `reason` argument to .set_exit_timeout() will be raised.
[ "Process", "the", "outputs", "of", "the", "nailgun", "session", "." ]
python
train
gem/oq-engine
openquake/commonlib/logictree.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L661-L688
def parse_branchset(self, branchset_node, depth, number, validate): """ Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`). """ uncertainty_type = branchset_node.attrib.get('uncertaintyType') filters = dict((filtername, branchset_node.attrib.get(filtername)) for filtername in self.FILTERS if filtername in branchset_node.attrib) if validate: self.validate_filters(branchset_node, uncertainty_type, filters) filters = self.parse_filters(branchset_node, uncertainty_type, filters) branchset = BranchSet(uncertainty_type, filters) if validate: self.validate_branchset(branchset_node, depth, number, branchset) return branchset
[ "def", "parse_branchset", "(", "self", ",", "branchset_node", ",", "depth", ",", "number", ",", "validate", ")", ":", "uncertainty_type", "=", "branchset_node", ".", "attrib", ".", "get", "(", "'uncertaintyType'", ")", "filters", "=", "dict", "(", "(", "filtername", ",", "branchset_node", ".", "attrib", ".", "get", "(", "filtername", ")", ")", "for", "filtername", "in", "self", ".", "FILTERS", "if", "filtername", "in", "branchset_node", ".", "attrib", ")", "if", "validate", ":", "self", ".", "validate_filters", "(", "branchset_node", ",", "uncertainty_type", ",", "filters", ")", "filters", "=", "self", ".", "parse_filters", "(", "branchset_node", ",", "uncertainty_type", ",", "filters", ")", "branchset", "=", "BranchSet", "(", "uncertainty_type", ",", "filters", ")", "if", "validate", ":", "self", ".", "validate_branchset", "(", "branchset_node", ",", "depth", ",", "number", ",", "branchset", ")", "return", "branchset" ]
Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`).
[ "Create", ":", "class", ":", "BranchSet", "object", "using", "data", "in", "branchset_node", "." ]
python
train
RLBot/RLBot
src/main/python/rlbot/utils/class_importer.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/utils/class_importer.py#L65-L95
def load_external_module(python_file): """ Returns the loaded module. All of its newly added dependencies are removed from sys.path after load. """ # There's a special case where python_file may be pointing at the base agent definition here in the framework. # This is sometimes done as a default and we want to allow it. Short-circuit the logic because # loading it as if it's an external class is a real mess. if os.path.abspath(python_file) == os.path.abspath(inspect.getfile(BaseAgent)): return BaseAgent, BaseAgent.__module__ if not os.path.isfile(python_file): raise FileNotFoundError(f"Could not find file {python_file}!") dir_name = os.path.dirname(python_file) module_name = os.path.splitext(os.path.basename(python_file))[0] keys_before = set(sys.modules.keys()) # Temporarily modify the sys.path while we load the module so that the module can use import statements naturally sys.path.insert(0, dir_name) loaded_module = importlib.import_module(module_name) # Clean up the changes to sys.path and sys.modules to avoid collisions with other external classes and to # prepare for the next reload. added = set(sys.modules.keys()).difference(keys_before) del sys.path[0] for key in added: del sys.modules[key] return loaded_module
[ "def", "load_external_module", "(", "python_file", ")", ":", "# There's a special case where python_file may be pointing at the base agent definition here in the framework.", "# This is sometimes done as a default and we want to allow it. Short-circuit the logic because", "# loading it as if it's an external class is a real mess.", "if", "os", ".", "path", ".", "abspath", "(", "python_file", ")", "==", "os", ".", "path", ".", "abspath", "(", "inspect", ".", "getfile", "(", "BaseAgent", ")", ")", ":", "return", "BaseAgent", ",", "BaseAgent", ".", "__module__", "if", "not", "os", ".", "path", ".", "isfile", "(", "python_file", ")", ":", "raise", "FileNotFoundError", "(", "f\"Could not find file {python_file}!\"", ")", "dir_name", "=", "os", ".", "path", ".", "dirname", "(", "python_file", ")", "module_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "python_file", ")", ")", "[", "0", "]", "keys_before", "=", "set", "(", "sys", ".", "modules", ".", "keys", "(", ")", ")", "# Temporarily modify the sys.path while we load the module so that the module can use import statements naturally", "sys", ".", "path", ".", "insert", "(", "0", ",", "dir_name", ")", "loaded_module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "# Clean up the changes to sys.path and sys.modules to avoid collisions with other external classes and to", "# prepare for the next reload.", "added", "=", "set", "(", "sys", ".", "modules", ".", "keys", "(", ")", ")", ".", "difference", "(", "keys_before", ")", "del", "sys", ".", "path", "[", "0", "]", "for", "key", "in", "added", ":", "del", "sys", ".", "modules", "[", "key", "]", "return", "loaded_module" ]
Returns the loaded module. All of its newly added dependencies are removed from sys.path after load.
[ "Returns", "the", "loaded", "module", ".", "All", "of", "its", "newly", "added", "dependencies", "are", "removed", "from", "sys", ".", "path", "after", "load", "." ]
python
train
saltstack/salt
salt/modules/bsd_shadow.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bsd_shadow.py#L49-L103
def info(name): ''' Return information for the specified user CLI Example: .. code-block:: bash salt '*' shadow.info someuser ''' try: data = pwd.getpwnam(name) ret = { 'name': data.pw_name, 'passwd': data.pw_passwd} except KeyError: return { 'name': '', 'passwd': ''} if not isinstance(name, six.string_types): name = six.text_type(name) if ':' in name: raise SaltInvocationError('Invalid username \'{0}\''.format(name)) if __salt__['cmd.has_exec']('pw'): change, expire = __salt__['cmd.run_stdout']( ['pw', 'usershow', '-n', name], python_shell=False).split(':')[5:7] elif __grains__['kernel'] in ('NetBSD', 'OpenBSD'): try: with salt.utils.files.fopen('/etc/master.passwd', 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('{0}:'.format(name)): key = line.split(':') change, expire = key[5:7] ret['passwd'] = six.text_type(key[1]) break except IOError: change = expire = None else: change = expire = None try: ret['change'] = int(change) except ValueError: pass try: ret['expire'] = int(expire) except ValueError: pass return ret
[ "def", "info", "(", "name", ")", ":", "try", ":", "data", "=", "pwd", ".", "getpwnam", "(", "name", ")", "ret", "=", "{", "'name'", ":", "data", ".", "pw_name", ",", "'passwd'", ":", "data", ".", "pw_passwd", "}", "except", "KeyError", ":", "return", "{", "'name'", ":", "''", ",", "'passwd'", ":", "''", "}", "if", "not", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "name", "=", "six", ".", "text_type", "(", "name", ")", "if", "':'", "in", "name", ":", "raise", "SaltInvocationError", "(", "'Invalid username \\'{0}\\''", ".", "format", "(", "name", ")", ")", "if", "__salt__", "[", "'cmd.has_exec'", "]", "(", "'pw'", ")", ":", "change", ",", "expire", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "[", "'pw'", ",", "'usershow'", ",", "'-n'", ",", "name", "]", ",", "python_shell", "=", "False", ")", ".", "split", "(", "':'", ")", "[", "5", ":", "7", "]", "elif", "__grains__", "[", "'kernel'", "]", "in", "(", "'NetBSD'", ",", "'OpenBSD'", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/etc/master.passwd'", ",", "'r'", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "line", ".", "startswith", "(", "'{0}:'", ".", "format", "(", "name", ")", ")", ":", "key", "=", "line", ".", "split", "(", "':'", ")", "change", ",", "expire", "=", "key", "[", "5", ":", "7", "]", "ret", "[", "'passwd'", "]", "=", "six", ".", "text_type", "(", "key", "[", "1", "]", ")", "break", "except", "IOError", ":", "change", "=", "expire", "=", "None", "else", ":", "change", "=", "expire", "=", "None", "try", ":", "ret", "[", "'change'", "]", "=", "int", "(", "change", ")", "except", "ValueError", ":", "pass", "try", ":", "ret", "[", "'expire'", "]", "=", "int", "(", "expire", ")", "except", "ValueError", ":", "pass", "return", "ret" ]
Return information for the specified user CLI Example: .. code-block:: bash salt '*' shadow.info someuser
[ "Return", "information", "for", "the", "specified", "user" ]
python
train
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2241-L2252
def _convert(self, format): """Return a new Image instance with the given format. Returns self if the format is already the same. """ if self.format == format: return self else: image = Image(self.pil_image) image._format = format return image
[ "def", "_convert", "(", "self", ",", "format", ")", ":", "if", "self", ".", "format", "==", "format", ":", "return", "self", "else", ":", "image", "=", "Image", "(", "self", ".", "pil_image", ")", "image", ".", "_format", "=", "format", "return", "image" ]
Return a new Image instance with the given format. Returns self if the format is already the same.
[ "Return", "a", "new", "Image", "instance", "with", "the", "given", "format", "." ]
python
train
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L367-L380
def log_every_n(level, msg, n, *args): """Logs 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n: int, the number of times this should be called before it is logged. *args: The args to be substitued into the msg. """ count = _get_next_log_count_per_token(get_absl_logger().findCaller()) log_if(level, msg, not (count % n), *args)
[ "def", "log_every_n", "(", "level", ",", "msg", ",", "n", ",", "*", "args", ")", ":", "count", "=", "_get_next_log_count_per_token", "(", "get_absl_logger", "(", ")", ".", "findCaller", "(", ")", ")", "log_if", "(", "level", ",", "msg", ",", "not", "(", "count", "%", "n", ")", ",", "*", "args", ")" ]
Logs 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n: int, the number of times this should be called before it is logged. *args: The args to be substitued into the msg.
[ "Logs", "msg", "%", "args", "at", "level", "level", "once", "per", "n", "times", "." ]
python
train
pantsbuild/pex
pex/third_party/__init__.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/third_party/__init__.py#L244-L267
def install(cls, uninstallable, prefix, path_items, root=None, warning=None): """Install an importer for modules found under ``path_items`` at the given import ``prefix``. :param bool uninstallable: ``True`` if the installed importer should be uninstalled and any imports it performed be un-imported when ``uninstall`` is called. :param str prefix: The import prefix the installed importer will be responsible for. :param path_items: The paths relative to ``root`` containing modules to expose for import under ``prefix``. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param str warning: An optional warning to emit if any imports are made through the installed importer. :return: """ root = cls._abs_root(root) importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix)) vendor_importer = cls(root=root, importables=importables, uninstallable=uninstallable, warning=warning) sys.meta_path.insert(0, vendor_importer) _tracer().log('Installed {}'.format(vendor_importer), V=3) return vendor_importer
[ "def", "install", "(", "cls", ",", "uninstallable", ",", "prefix", ",", "path_items", ",", "root", "=", "None", ",", "warning", "=", "None", ")", ":", "root", "=", "cls", ".", "_abs_root", "(", "root", ")", "importables", "=", "tuple", "(", "cls", ".", "_iter_importables", "(", "root", "=", "root", ",", "path_items", "=", "path_items", ",", "prefix", "=", "prefix", ")", ")", "vendor_importer", "=", "cls", "(", "root", "=", "root", ",", "importables", "=", "importables", ",", "uninstallable", "=", "uninstallable", ",", "warning", "=", "warning", ")", "sys", ".", "meta_path", ".", "insert", "(", "0", ",", "vendor_importer", ")", "_tracer", "(", ")", ".", "log", "(", "'Installed {}'", ".", "format", "(", "vendor_importer", ")", ",", "V", "=", "3", ")", "return", "vendor_importer" ]
Install an importer for modules found under ``path_items`` at the given import ``prefix``. :param bool uninstallable: ``True`` if the installed importer should be uninstalled and any imports it performed be un-imported when ``uninstall`` is called. :param str prefix: The import prefix the installed importer will be responsible for. :param path_items: The paths relative to ``root`` containing modules to expose for import under ``prefix``. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param str warning: An optional warning to emit if any imports are made through the installed importer. :return:
[ "Install", "an", "importer", "for", "modules", "found", "under", "path_items", "at", "the", "given", "import", "prefix", "." ]
python
train
FPGAwars/apio
apio/commands/upgrade.py
https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/upgrade.py#L16-L32
def cli(ctx): """Check the latest Apio version.""" current_version = get_distribution('apio').version latest_version = get_pypi_latest_version() if latest_version is None: ctx.exit(1) if latest_version == current_version: click.secho('You\'re up-to-date!\nApio {} is currently the ' 'newest version available.'.format(latest_version), fg='green') else: click.secho('You\'re not updated\nPlease execute ' '`pip install -U apio` to upgrade.', fg="yellow")
[ "def", "cli", "(", "ctx", ")", ":", "current_version", "=", "get_distribution", "(", "'apio'", ")", ".", "version", "latest_version", "=", "get_pypi_latest_version", "(", ")", "if", "latest_version", "is", "None", ":", "ctx", ".", "exit", "(", "1", ")", "if", "latest_version", "==", "current_version", ":", "click", ".", "secho", "(", "'You\\'re up-to-date!\\nApio {} is currently the '", "'newest version available.'", ".", "format", "(", "latest_version", ")", ",", "fg", "=", "'green'", ")", "else", ":", "click", ".", "secho", "(", "'You\\'re not updated\\nPlease execute '", "'`pip install -U apio` to upgrade.'", ",", "fg", "=", "\"yellow\"", ")" ]
Check the latest Apio version.
[ "Check", "the", "latest", "Apio", "version", "." ]
python
train
pazz/alot
alot/db/manager.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/manager.py#L424-L445
def add_message(self, path, tags=None, afterwards=None): """ Adds a file to the notmuch index. :param path: path to the file :type path: str :param tags: tagstrings to add :type tags: list of str :param afterwards: callback to trigger after adding :type afterwards: callable or None """ tags = tags or [] if self.ro: raise DatabaseROError() if not is_subdir_of(path, self.path): msg = 'message path %s ' % path msg += ' is not below notmuchs ' msg += 'root path (%s)' % self.path raise DatabaseError(msg) else: self.writequeue.append(('add', afterwards, path, tags))
[ "def", "add_message", "(", "self", ",", "path", ",", "tags", "=", "None", ",", "afterwards", "=", "None", ")", ":", "tags", "=", "tags", "or", "[", "]", "if", "self", ".", "ro", ":", "raise", "DatabaseROError", "(", ")", "if", "not", "is_subdir_of", "(", "path", ",", "self", ".", "path", ")", ":", "msg", "=", "'message path %s '", "%", "path", "msg", "+=", "' is not below notmuchs '", "msg", "+=", "'root path (%s)'", "%", "self", ".", "path", "raise", "DatabaseError", "(", "msg", ")", "else", ":", "self", ".", "writequeue", ".", "append", "(", "(", "'add'", ",", "afterwards", ",", "path", ",", "tags", ")", ")" ]
Adds a file to the notmuch index. :param path: path to the file :type path: str :param tags: tagstrings to add :type tags: list of str :param afterwards: callback to trigger after adding :type afterwards: callable or None
[ "Adds", "a", "file", "to", "the", "notmuch", "index", "." ]
python
train
The-Politico/politico-civic-entity
entity/models/person.py
https://github.com/The-Politico/politico-civic-entity/blob/318cd14c407e8e650374f784e692923798eacd81/entity/models/person.py#L62-L86
def save(self, *args, **kwargs): """ **uid**: :code:`person:{slug}` """ if not self.full_name: self.full_name = '{0}{1}{2}'.format( self.first_name, '{}'.format( ' ' + self.middle_name + ' ' if self.middle_name else ' ', ), self.last_name, '{}'.format(' ' + self.suffix if self.suffix else '') ) self.slug = uuslug( self.full_name, instance=self, max_length=100, separator='-', start_no=2 ) if not self.uid: self.uid = 'person:{}'.format(self.slug) super(Person, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "full_name", ":", "self", ".", "full_name", "=", "'{0}{1}{2}'", ".", "format", "(", "self", ".", "first_name", ",", "'{}'", ".", "format", "(", "' '", "+", "self", ".", "middle_name", "+", "' '", "if", "self", ".", "middle_name", "else", "' '", ",", ")", ",", "self", ".", "last_name", ",", "'{}'", ".", "format", "(", "' '", "+", "self", ".", "suffix", "if", "self", ".", "suffix", "else", "''", ")", ")", "self", ".", "slug", "=", "uuslug", "(", "self", ".", "full_name", ",", "instance", "=", "self", ",", "max_length", "=", "100", ",", "separator", "=", "'-'", ",", "start_no", "=", "2", ")", "if", "not", "self", ".", "uid", ":", "self", ".", "uid", "=", "'person:{}'", ".", "format", "(", "self", ".", "slug", ")", "super", "(", "Person", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
**uid**: :code:`person:{slug}`
[ "**", "uid", "**", ":", ":", "code", ":", "person", ":", "{", "slug", "}" ]
python
train
lablup/backend.ai-client-py
src/ai/backend/client/cli/config.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/config.py#L9-L23
def config(): ''' Shows the current configuration. ''' config = get_config() print('Client version: {0}'.format(click.style(__version__, bold=True))) print('API endpoint: {0}'.format(click.style(str(config.endpoint), bold=True))) print('API version: {0}'.format(click.style(config.version, bold=True))) print('Access key: "{0}"'.format(click.style(config.access_key, bold=True))) masked_skey = config.secret_key[:6] + ('*' * 24) + config.secret_key[-10:] print('Secret key: "{0}"'.format(click.style(masked_skey, bold=True))) print('Signature hash type: {0}'.format( click.style(config.hash_type, bold=True))) print('Skip SSL certificate validation? {0}'.format( click.style(str(config.skip_sslcert_validation), bold=True)))
[ "def", "config", "(", ")", ":", "config", "=", "get_config", "(", ")", "print", "(", "'Client version: {0}'", ".", "format", "(", "click", ".", "style", "(", "__version__", ",", "bold", "=", "True", ")", ")", ")", "print", "(", "'API endpoint: {0}'", ".", "format", "(", "click", ".", "style", "(", "str", "(", "config", ".", "endpoint", ")", ",", "bold", "=", "True", ")", ")", ")", "print", "(", "'API version: {0}'", ".", "format", "(", "click", ".", "style", "(", "config", ".", "version", ",", "bold", "=", "True", ")", ")", ")", "print", "(", "'Access key: \"{0}\"'", ".", "format", "(", "click", ".", "style", "(", "config", ".", "access_key", ",", "bold", "=", "True", ")", ")", ")", "masked_skey", "=", "config", ".", "secret_key", "[", ":", "6", "]", "+", "(", "'*'", "*", "24", ")", "+", "config", ".", "secret_key", "[", "-", "10", ":", "]", "print", "(", "'Secret key: \"{0}\"'", ".", "format", "(", "click", ".", "style", "(", "masked_skey", ",", "bold", "=", "True", ")", ")", ")", "print", "(", "'Signature hash type: {0}'", ".", "format", "(", "click", ".", "style", "(", "config", ".", "hash_type", ",", "bold", "=", "True", ")", ")", ")", "print", "(", "'Skip SSL certificate validation? {0}'", ".", "format", "(", "click", ".", "style", "(", "str", "(", "config", ".", "skip_sslcert_validation", ")", ",", "bold", "=", "True", ")", ")", ")" ]
Shows the current configuration.
[ "Shows", "the", "current", "configuration", "." ]
python
train
karel-brinda/rnftools
rnftools/rnfformat/FqCreator.py
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/FqCreator.py#L49-L87
def flush_read_tuple(self): """Flush the internal buffer of reads. """ if not self.is_empty(): suffix_comment_buffer = [] if self._info_simulator is not None: suffix_comment_buffer.append(self._info_simulator) if self._info_reads_in_tuple: # todo: orientation (FF, FR, etc.) # orientation="".join([]) suffix_comment_buffer.append("reads-in-tuple:{}".format(len(self.seqs_bases))) if len(suffix_comment_buffer) != 0: suffix_comment = "[{}]".format(",".join(suffix_comment_buffer)) else: suffix_comment = "" rnf_name = self._rnf_profile.get_rnf_name( rnftools.rnfformat.ReadTuple( segments=self.segments, read_tuple_id=self.current_read_tuple_id, suffix=suffix_comment, ) ) fq_reads = [ os.linesep.join( [ "@{rnf_name}{read_suffix}".format( rnf_name=rnf_name, read_suffix="/{}".format(str(i + 1)) if len(self.seqs_bases) > 1 else "", ), self.seqs_bases[i], "+", self.seqs_qualities[i], ] ) for i in range(len(self.seqs_bases)) ] self._fq_file.write(os.linesep.join(fq_reads)) self._fq_file.write(os.linesep) self.empty()
[ "def", "flush_read_tuple", "(", "self", ")", ":", "if", "not", "self", ".", "is_empty", "(", ")", ":", "suffix_comment_buffer", "=", "[", "]", "if", "self", ".", "_info_simulator", "is", "not", "None", ":", "suffix_comment_buffer", ".", "append", "(", "self", ".", "_info_simulator", ")", "if", "self", ".", "_info_reads_in_tuple", ":", "# todo: orientation (FF, FR, etc.)", "# orientation=\"\".join([])", "suffix_comment_buffer", ".", "append", "(", "\"reads-in-tuple:{}\"", ".", "format", "(", "len", "(", "self", ".", "seqs_bases", ")", ")", ")", "if", "len", "(", "suffix_comment_buffer", ")", "!=", "0", ":", "suffix_comment", "=", "\"[{}]\"", ".", "format", "(", "\",\"", ".", "join", "(", "suffix_comment_buffer", ")", ")", "else", ":", "suffix_comment", "=", "\"\"", "rnf_name", "=", "self", ".", "_rnf_profile", ".", "get_rnf_name", "(", "rnftools", ".", "rnfformat", ".", "ReadTuple", "(", "segments", "=", "self", ".", "segments", ",", "read_tuple_id", "=", "self", ".", "current_read_tuple_id", ",", "suffix", "=", "suffix_comment", ",", ")", ")", "fq_reads", "=", "[", "os", ".", "linesep", ".", "join", "(", "[", "\"@{rnf_name}{read_suffix}\"", ".", "format", "(", "rnf_name", "=", "rnf_name", ",", "read_suffix", "=", "\"/{}\"", ".", "format", "(", "str", "(", "i", "+", "1", ")", ")", "if", "len", "(", "self", ".", "seqs_bases", ")", ">", "1", "else", "\"\"", ",", ")", ",", "self", ".", "seqs_bases", "[", "i", "]", ",", "\"+\"", ",", "self", ".", "seqs_qualities", "[", "i", "]", ",", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "seqs_bases", ")", ")", "]", "self", ".", "_fq_file", ".", "write", "(", "os", ".", "linesep", ".", "join", "(", "fq_reads", ")", ")", "self", ".", "_fq_file", ".", "write", "(", "os", ".", "linesep", ")", "self", ".", "empty", "(", ")" ]
Flush the internal buffer of reads.
[ "Flush", "the", "internal", "buffer", "of", "reads", "." ]
python
train
Kane610/deconz
pydeconz/websocket.py
https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/websocket.py#L88-L92
def retry(self): """Retry to connect to deCONZ.""" self.state = STATE_STARTING self.loop.call_later(RETRY_TIMER, self.start) _LOGGER.debug('Reconnecting to deCONZ in %i.', RETRY_TIMER)
[ "def", "retry", "(", "self", ")", ":", "self", ".", "state", "=", "STATE_STARTING", "self", ".", "loop", ".", "call_later", "(", "RETRY_TIMER", ",", "self", ".", "start", ")", "_LOGGER", ".", "debug", "(", "'Reconnecting to deCONZ in %i.'", ",", "RETRY_TIMER", ")" ]
Retry to connect to deCONZ.
[ "Retry", "to", "connect", "to", "deCONZ", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/sdist.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/command/sdist.py#L62-L83
def externals_finder(dirname, filename): """Find any 'svn:externals' directories""" found = False f = open(filename,'rt') for line in iter(f.readline, ''): # can't use direct iter! parts = line.split() if len(parts)==2: kind,length = parts data = f.read(int(length)) if kind=='K' and data=='svn:externals': found = True elif kind=='V' and found: f.close() break else: f.close() return for line in data.splitlines(): parts = line.split() if parts: yield joinpath(dirname, parts[0])
[ "def", "externals_finder", "(", "dirname", ",", "filename", ")", ":", "found", "=", "False", "f", "=", "open", "(", "filename", ",", "'rt'", ")", "for", "line", "in", "iter", "(", "f", ".", "readline", ",", "''", ")", ":", "# can't use direct iter!", "parts", "=", "line", ".", "split", "(", ")", "if", "len", "(", "parts", ")", "==", "2", ":", "kind", ",", "length", "=", "parts", "data", "=", "f", ".", "read", "(", "int", "(", "length", ")", ")", "if", "kind", "==", "'K'", "and", "data", "==", "'svn:externals'", ":", "found", "=", "True", "elif", "kind", "==", "'V'", "and", "found", ":", "f", ".", "close", "(", ")", "break", "else", ":", "f", ".", "close", "(", ")", "return", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "parts", "=", "line", ".", "split", "(", ")", "if", "parts", ":", "yield", "joinpath", "(", "dirname", ",", "parts", "[", "0", "]", ")" ]
Find any 'svn:externals' directories
[ "Find", "any", "svn", ":", "externals", "directories" ]
python
test
pyamg/pyamg
pyamg/multilevel.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L275-L316
def aspreconditioner(self, cycle='V'): """Create a preconditioner using this multigrid cycle. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- precond : LinearOperator Preconditioner suitable for the iterative solvers in defined in the scipy.sparse.linalg module (e.g. cg, gmres) and any other solver that uses the LinearOperator interface. Refer to the LinearOperator documentation in scipy.sparse.linalg See Also -------- multilevel_solver.solve, scipy.sparse.linalg.LinearOperator Examples -------- >>> from pyamg.aggregation import smoothed_aggregation_solver >>> from pyamg.gallery import poisson >>> from scipy.sparse.linalg import cg >>> import scipy as sp >>> A = poisson((100, 100), format='csr') # matrix >>> b = sp.rand(A.shape[0]) # random RHS >>> ml = smoothed_aggregation_solver(A) # AMG solver >>> M = ml.aspreconditioner(cycle='V') # preconditioner >>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG """ from scipy.sparse.linalg import LinearOperator shape = self.levels[0].A.shape dtype = self.levels[0].A.dtype def matvec(b): return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12) return LinearOperator(shape, matvec, dtype=dtype)
[ "def", "aspreconditioner", "(", "self", ",", "cycle", "=", "'V'", ")", ":", "from", "scipy", ".", "sparse", ".", "linalg", "import", "LinearOperator", "shape", "=", "self", ".", "levels", "[", "0", "]", ".", "A", ".", "shape", "dtype", "=", "self", ".", "levels", "[", "0", "]", ".", "A", ".", "dtype", "def", "matvec", "(", "b", ")", ":", "return", "self", ".", "solve", "(", "b", ",", "maxiter", "=", "1", ",", "cycle", "=", "cycle", ",", "tol", "=", "1e-12", ")", "return", "LinearOperator", "(", "shape", ",", "matvec", ",", "dtype", "=", "dtype", ")" ]
Create a preconditioner using this multigrid cycle. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- precond : LinearOperator Preconditioner suitable for the iterative solvers in defined in the scipy.sparse.linalg module (e.g. cg, gmres) and any other solver that uses the LinearOperator interface. Refer to the LinearOperator documentation in scipy.sparse.linalg See Also -------- multilevel_solver.solve, scipy.sparse.linalg.LinearOperator Examples -------- >>> from pyamg.aggregation import smoothed_aggregation_solver >>> from pyamg.gallery import poisson >>> from scipy.sparse.linalg import cg >>> import scipy as sp >>> A = poisson((100, 100), format='csr') # matrix >>> b = sp.rand(A.shape[0]) # random RHS >>> ml = smoothed_aggregation_solver(A) # AMG solver >>> M = ml.aspreconditioner(cycle='V') # preconditioner >>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
[ "Create", "a", "preconditioner", "using", "this", "multigrid", "cycle", "." ]
python
train
saltstack/salt
salt/runners/f5.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/f5.py#L330-L345
def check_member_pool(lb, member, pool_name): ''' Check a pool member exists in a specific pool CLI Examples: .. code-block:: bash salt-run f5.check_member_pool load_balancer 10.0.0.1 my_pool ''' if __opts__['load_balancers'].get(lb, None): (username, password) = list(__opts__['load_balancers'][lb].values()) else: raise Exception('Unable to find `{0}` load balancer'.format(lb)) F5 = F5Mgmt(lb, username, password) return F5.check_member_pool(member, pool_name)
[ "def", "check_member_pool", "(", "lb", ",", "member", ",", "pool_name", ")", ":", "if", "__opts__", "[", "'load_balancers'", "]", ".", "get", "(", "lb", ",", "None", ")", ":", "(", "username", ",", "password", ")", "=", "list", "(", "__opts__", "[", "'load_balancers'", "]", "[", "lb", "]", ".", "values", "(", ")", ")", "else", ":", "raise", "Exception", "(", "'Unable to find `{0}` load balancer'", ".", "format", "(", "lb", ")", ")", "F5", "=", "F5Mgmt", "(", "lb", ",", "username", ",", "password", ")", "return", "F5", ".", "check_member_pool", "(", "member", ",", "pool_name", ")" ]
Check a pool member exists in a specific pool CLI Examples: .. code-block:: bash salt-run f5.check_member_pool load_balancer 10.0.0.1 my_pool
[ "Check", "a", "pool", "member", "exists", "in", "a", "specific", "pool" ]
python
train
tsnaomi/finnsyll
finnsyll/prev/v06.py
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v06.py#L148-L173
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' word = split(word) # detect any non-delimited compounds compound = True if re.search(r'-| |\.', word) else False syllabify = _syllabify_compound if compound else _syllabify syll, rules = syllabify(word) yield syll, rules n = 3 if 'T4' in rules: yield syllabify(word, T4=False) n -= 1 if 'e' in rules: yield syllabify(word, T1E=False) n -= 1 if 'e' in rules and 'T4' in rules: yield syllabify(word, T4=False, T1E=False) n -= 1 # yield empty syllabifications and rules for n in range(7): yield '', ''
[ "def", "syllabify", "(", "word", ")", ":", "word", "=", "split", "(", "word", ")", "# detect any non-delimited compounds", "compound", "=", "True", "if", "re", ".", "search", "(", "r'-| |\\.'", ",", "word", ")", "else", "False", "syllabify", "=", "_syllabify_compound", "if", "compound", "else", "_syllabify", "syll", ",", "rules", "=", "syllabify", "(", "word", ")", "yield", "syll", ",", "rules", "n", "=", "3", "if", "'T4'", "in", "rules", ":", "yield", "syllabify", "(", "word", ",", "T4", "=", "False", ")", "n", "-=", "1", "if", "'e'", "in", "rules", ":", "yield", "syllabify", "(", "word", ",", "T1E", "=", "False", ")", "n", "-=", "1", "if", "'e'", "in", "rules", "and", "'T4'", "in", "rules", ":", "yield", "syllabify", "(", "word", ",", "T4", "=", "False", ",", "T1E", "=", "False", ")", "n", "-=", "1", "# yield empty syllabifications and rules", "for", "n", "in", "range", "(", "7", ")", ":", "yield", "''", ",", "''" ]
Syllabify the given word, whether simplex or complex.
[ "Syllabify", "the", "given", "word", "whether", "simplex", "or", "complex", "." ]
python
train
intel-analytics/BigDL
pyspark/bigdl/optim/optimizer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L914-L923
def set_traindata(self, training_rdd, batch_size): """ Set new training dataset, for optimizer reuse :param training_rdd: the training dataset :param batch_size: training batch size :return: """ callBigDlFunc(self.bigdl_type, "setTrainData", self.value, training_rdd, batch_size)
[ "def", "set_traindata", "(", "self", ",", "training_rdd", ",", "batch_size", ")", ":", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"setTrainData\"", ",", "self", ".", "value", ",", "training_rdd", ",", "batch_size", ")" ]
Set new training dataset, for optimizer reuse :param training_rdd: the training dataset :param batch_size: training batch size :return:
[ "Set", "new", "training", "dataset", "for", "optimizer", "reuse" ]
python
test
hayalasalah/adhan.py
adhan/calculations.py
https://github.com/hayalasalah/adhan.py/blob/a7c080ba48f70be9801f048451d2c91a7d579602/adhan/calculations.py#L81-L102
def sun_declination(day): """Compute the declination angle of the sun for the given date. Uses the Spencer Formula (found at http://www.illustratingshadows.com/www-formulae-collection.pdf) :param day: The datetime.date to compute the declination angle for :returns: The angle, in degrees, of the angle of declination """ day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() day_angle = 2 * pi * day_of_year / 365 declination_radians = sum([ 0.006918, 0.001480*sin(3*day_angle), 0.070257*sin(day_angle), 0.000907*sin(2*day_angle), -0.399912*cos(day_angle), -0.006758*cos(2*day_angle), -0.002697*cos(3*day_angle), ]) return degrees(declination_radians)
[ "def", "sun_declination", "(", "day", ")", ":", "day_of_year", "=", "day", ".", "toordinal", "(", ")", "-", "date", "(", "day", ".", "year", ",", "1", ",", "1", ")", ".", "toordinal", "(", ")", "day_angle", "=", "2", "*", "pi", "*", "day_of_year", "/", "365", "declination_radians", "=", "sum", "(", "[", "0.006918", ",", "0.001480", "*", "sin", "(", "3", "*", "day_angle", ")", ",", "0.070257", "*", "sin", "(", "day_angle", ")", ",", "0.000907", "*", "sin", "(", "2", "*", "day_angle", ")", ",", "-", "0.399912", "*", "cos", "(", "day_angle", ")", ",", "-", "0.006758", "*", "cos", "(", "2", "*", "day_angle", ")", ",", "-", "0.002697", "*", "cos", "(", "3", "*", "day_angle", ")", ",", "]", ")", "return", "degrees", "(", "declination_radians", ")" ]
Compute the declination angle of the sun for the given date. Uses the Spencer Formula (found at http://www.illustratingshadows.com/www-formulae-collection.pdf) :param day: The datetime.date to compute the declination angle for :returns: The angle, in degrees, of the angle of declination
[ "Compute", "the", "declination", "angle", "of", "the", "sun", "for", "the", "given", "date", "." ]
python
train
mohamedattahri/PyXMLi
pyxmli/__init__.py
https://github.com/mohamedattahri/PyXMLi/blob/a81a245be822d62f1a20c734ca14b42c786ae81e/pyxmli/__init__.py#L256-L264
def to_string(self, indent="", newl="", addindent=""): ''' Returns a string representation of the XMLi element. @return: str ''' buf = StringIO() self.to_xml().writexml(buf, indent=indent, addindent=addindent, newl=newl) return buf.getvalue()
[ "def", "to_string", "(", "self", ",", "indent", "=", "\"\"", ",", "newl", "=", "\"\"", ",", "addindent", "=", "\"\"", ")", ":", "buf", "=", "StringIO", "(", ")", "self", ".", "to_xml", "(", ")", ".", "writexml", "(", "buf", ",", "indent", "=", "indent", ",", "addindent", "=", "addindent", ",", "newl", "=", "newl", ")", "return", "buf", ".", "getvalue", "(", ")" ]
Returns a string representation of the XMLi element. @return: str
[ "Returns", "a", "string", "representation", "of", "the", "XMLi", "element", "." ]
python
train
sammchardy/python-kucoin
kucoin/client.py
https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L1484-L1521
def get_fiat_prices(self, base=None, symbol=None): """Get fiat price for currency https://docs.kucoin.com/#get-fiat-price :param base: (optional) Fiat,eg.USD,EUR, default is USD. :type base: string :param symbol: (optional) Cryptocurrencies.For multiple cyrptocurrencies, please separate them with comma one by one. default is all :type symbol: string .. code:: python prices = client.get_fiat_prices() :returns: ApiResponse .. code:: python { "BTC": "3911.28000000", "ETH": "144.55492453", "LTC": "48.45888179", "KCS": "0.45546856" } :raises: KucoinResponseException, KucoinAPIException """ data = {} if base is not None: data['base'] = base if symbol is not None: data['currencies'] = symbol return self._get('prices', False, data=data)
[ "def", "get_fiat_prices", "(", "self", ",", "base", "=", "None", ",", "symbol", "=", "None", ")", ":", "data", "=", "{", "}", "if", "base", "is", "not", "None", ":", "data", "[", "'base'", "]", "=", "base", "if", "symbol", "is", "not", "None", ":", "data", "[", "'currencies'", "]", "=", "symbol", "return", "self", ".", "_get", "(", "'prices'", ",", "False", ",", "data", "=", "data", ")" ]
Get fiat price for currency https://docs.kucoin.com/#get-fiat-price :param base: (optional) Fiat,eg.USD,EUR, default is USD. :type base: string :param symbol: (optional) Cryptocurrencies.For multiple cyrptocurrencies, please separate them with comma one by one. default is all :type symbol: string .. code:: python prices = client.get_fiat_prices() :returns: ApiResponse .. code:: python { "BTC": "3911.28000000", "ETH": "144.55492453", "LTC": "48.45888179", "KCS": "0.45546856" } :raises: KucoinResponseException, KucoinAPIException
[ "Get", "fiat", "price", "for", "currency" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__pload.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__pload.py#L22-L49
def _paddr(ins): """ Returns code sequence which points to local variable or parameter (HL) """ output = [] oper = ins.quad[1] indirect = (oper[0] == '*') if indirect: oper = oper[1:] I = int(oper) if I >= 0: I += 4 # Return Address + "push IX" output.append('push ix') output.append('pop hl') output.append('ld de, %i' % I) output.append('add hl, de') if indirect: output.append('ld e, (hl)') output.append('inc hl') output.append('ld h, (hl)') output.append('ld l, e') output.append('push hl') return output
[ "def", "_paddr", "(", "ins", ")", ":", "output", "=", "[", "]", "oper", "=", "ins", ".", "quad", "[", "1", "]", "indirect", "=", "(", "oper", "[", "0", "]", "==", "'*'", ")", "if", "indirect", ":", "oper", "=", "oper", "[", "1", ":", "]", "I", "=", "int", "(", "oper", ")", "if", "I", ">=", "0", ":", "I", "+=", "4", "# Return Address + \"push IX\"", "output", ".", "append", "(", "'push ix'", ")", "output", ".", "append", "(", "'pop hl'", ")", "output", ".", "append", "(", "'ld de, %i'", "%", "I", ")", "output", ".", "append", "(", "'add hl, de'", ")", "if", "indirect", ":", "output", ".", "append", "(", "'ld e, (hl)'", ")", "output", ".", "append", "(", "'inc hl'", ")", "output", ".", "append", "(", "'ld h, (hl)'", ")", "output", ".", "append", "(", "'ld l, e'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output" ]
Returns code sequence which points to local variable or parameter (HL)
[ "Returns", "code", "sequence", "which", "points", "to", "local", "variable", "or", "parameter", "(", "HL", ")" ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1592-L1622
def pretty_print(self, objlist): '''Pretty print the result of s3walk. Here we calculate the maximum width of each column and align them. ''' def normalize_time(timestamp): '''Normalize the timestamp format for pretty print.''' if timestamp is None: return ' ' * 16 return TIMESTAMP_FORMAT % (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute) cwidth = [0, 0, 0] format = '%%%ds %%%ds %%-%ds' # Calculate maximum width for each column. result = [] for obj in objlist: last_modified = normalize_time(obj['last_modified']) size = str(obj['size']) if not obj['is_dir'] else 'DIR' name = obj['name'] item = (last_modified, size, name) for i, value in enumerate(item): if cwidth[i] < len(value): cwidth[i] = len(value) result.append(item) # Format output. for item in result: text = (format % tuple(cwidth)) % item message('%s', text.rstrip())
[ "def", "pretty_print", "(", "self", ",", "objlist", ")", ":", "def", "normalize_time", "(", "timestamp", ")", ":", "'''Normalize the timestamp format for pretty print.'''", "if", "timestamp", "is", "None", ":", "return", "' '", "*", "16", "return", "TIMESTAMP_FORMAT", "%", "(", "timestamp", ".", "year", ",", "timestamp", ".", "month", ",", "timestamp", ".", "day", ",", "timestamp", ".", "hour", ",", "timestamp", ".", "minute", ")", "cwidth", "=", "[", "0", ",", "0", ",", "0", "]", "format", "=", "'%%%ds %%%ds %%-%ds'", "# Calculate maximum width for each column.", "result", "=", "[", "]", "for", "obj", "in", "objlist", ":", "last_modified", "=", "normalize_time", "(", "obj", "[", "'last_modified'", "]", ")", "size", "=", "str", "(", "obj", "[", "'size'", "]", ")", "if", "not", "obj", "[", "'is_dir'", "]", "else", "'DIR'", "name", "=", "obj", "[", "'name'", "]", "item", "=", "(", "last_modified", ",", "size", ",", "name", ")", "for", "i", ",", "value", "in", "enumerate", "(", "item", ")", ":", "if", "cwidth", "[", "i", "]", "<", "len", "(", "value", ")", ":", "cwidth", "[", "i", "]", "=", "len", "(", "value", ")", "result", ".", "append", "(", "item", ")", "# Format output.", "for", "item", "in", "result", ":", "text", "=", "(", "format", "%", "tuple", "(", "cwidth", ")", ")", "%", "item", "message", "(", "'%s'", ",", "text", ".", "rstrip", "(", ")", ")" ]
Pretty print the result of s3walk. Here we calculate the maximum width of each column and align them.
[ "Pretty", "print", "the", "result", "of", "s3walk", ".", "Here", "we", "calculate", "the", "maximum", "width", "of", "each", "column", "and", "align", "them", "." ]
python
test
inasafe/inasafe
safe/gui/tools/wizard/step_kw33_multi_classifications.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw33_multi_classifications.py#L391-L469
def show_current_state(self): """Setup the UI for QTextEdit to show the current state.""" right_panel_heading = QLabel(tr('Status')) right_panel_heading.setFont(big_font) right_panel_heading.setSizePolicy( QSizePolicy.Maximum, QSizePolicy.Maximum) self.right_layout.addWidget(right_panel_heading) message = m.Message() if self.layer_mode == layer_mode_continuous: title = tr('Thresholds') else: title = tr('Value maps') message.add(m.Heading(title, **INFO_STYLE)) for i in range(len(self.exposures)): message.add(m.Text(self.exposure_labels[i])) classification = self.get_classification( self.exposure_combo_boxes[i]) if self.layer_mode == layer_mode_continuous: thresholds = self.thresholds.get(self.exposures[i]['key']) if not thresholds or not classification: message.add(m.Paragraph(tr('No classifications set.'))) continue table = m.Table( style_class='table table-condensed table-striped') header = m.Row() header.add(m.Cell(tr('Class name'))) header.add(m.Cell(tr('Minimum'))) header.add(m.Cell(tr('Maximum'))) table.add(header) classes = classification.get('classes') # Sort by value, put the lowest first classes = sorted(classes, key=lambda k: k['value']) for the_class in classes: threshold = thresholds[classification['key']]['classes'][ the_class['key']] row = m.Row() row.add(m.Cell(the_class['name'])) row.add(m.Cell(threshold[0])) row.add(m.Cell(threshold[1])) table.add(row) else: value_maps = self.value_maps.get(self.exposures[i]['key']) if not value_maps or not classification: message.add(m.Paragraph(tr('No classifications set.'))) continue table = m.Table( style_class='table table-condensed table-striped') header = m.Row() header.add(m.Cell(tr('Class name'))) header.add(m.Cell(tr('Value'))) table.add(header) classes = classification.get('classes') # Sort by value, put the lowest first classes = sorted(classes, key=lambda k: k['value']) for the_class in classes: value_map = value_maps[classification['key']][ 'classes'].get(the_class['key'], []) row = m.Row() row.add(m.Cell(the_class['name'])) row.add(m.Cell(', '.join([str(v) for v in value_map]))) table.add(row) message.add(table) # status_text_edit = QTextBrowser(None) status_text_edit = QWebView(None) status_text_edit.setSizePolicy( QSizePolicy.Ignored, QSizePolicy.Ignored) status_text_edit.page().mainFrame().setScrollBarPolicy( Qt.Horizontal, Qt.ScrollBarAlwaysOff) html_string = html_header() + message.to_html() + html_footer() status_text_edit.setHtml(html_string) self.right_layout.addWidget(status_text_edit)
[ "def", "show_current_state", "(", "self", ")", ":", "right_panel_heading", "=", "QLabel", "(", "tr", "(", "'Status'", ")", ")", "right_panel_heading", ".", "setFont", "(", "big_font", ")", "right_panel_heading", ".", "setSizePolicy", "(", "QSizePolicy", ".", "Maximum", ",", "QSizePolicy", ".", "Maximum", ")", "self", ".", "right_layout", ".", "addWidget", "(", "right_panel_heading", ")", "message", "=", "m", ".", "Message", "(", ")", "if", "self", ".", "layer_mode", "==", "layer_mode_continuous", ":", "title", "=", "tr", "(", "'Thresholds'", ")", "else", ":", "title", "=", "tr", "(", "'Value maps'", ")", "message", ".", "add", "(", "m", ".", "Heading", "(", "title", ",", "*", "*", "INFO_STYLE", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "exposures", ")", ")", ":", "message", ".", "add", "(", "m", ".", "Text", "(", "self", ".", "exposure_labels", "[", "i", "]", ")", ")", "classification", "=", "self", ".", "get_classification", "(", "self", ".", "exposure_combo_boxes", "[", "i", "]", ")", "if", "self", ".", "layer_mode", "==", "layer_mode_continuous", ":", "thresholds", "=", "self", ".", "thresholds", ".", "get", "(", "self", ".", "exposures", "[", "i", "]", "[", "'key'", "]", ")", "if", "not", "thresholds", "or", "not", "classification", ":", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'No classifications set.'", ")", ")", ")", "continue", "table", "=", "m", ".", "Table", "(", "style_class", "=", "'table table-condensed table-striped'", ")", "header", "=", "m", ".", "Row", "(", ")", "header", ".", "add", "(", "m", ".", "Cell", "(", "tr", "(", "'Class name'", ")", ")", ")", "header", ".", "add", "(", "m", ".", "Cell", "(", "tr", "(", "'Minimum'", ")", ")", ")", "header", ".", "add", "(", "m", ".", "Cell", "(", "tr", "(", "'Maximum'", ")", ")", ")", "table", ".", "add", "(", "header", ")", "classes", "=", "classification", ".", "get", "(", "'classes'", ")", "# Sort by value, put the lowest first", "classes", "=", "sorted", "(", "classes", ",", "key", "=", "lambda", "k", ":", "k", "[", "'value'", "]", ")", "for", "the_class", "in", "classes", ":", "threshold", "=", "thresholds", "[", "classification", "[", "'key'", "]", "]", "[", "'classes'", "]", "[", "the_class", "[", "'key'", "]", "]", "row", "=", "m", ".", "Row", "(", ")", "row", ".", "add", "(", "m", ".", "Cell", "(", "the_class", "[", "'name'", "]", ")", ")", "row", ".", "add", "(", "m", ".", "Cell", "(", "threshold", "[", "0", "]", ")", ")", "row", ".", "add", "(", "m", ".", "Cell", "(", "threshold", "[", "1", "]", ")", ")", "table", ".", "add", "(", "row", ")", "else", ":", "value_maps", "=", "self", ".", "value_maps", ".", "get", "(", "self", ".", "exposures", "[", "i", "]", "[", "'key'", "]", ")", "if", "not", "value_maps", "or", "not", "classification", ":", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'No classifications set.'", ")", ")", ")", "continue", "table", "=", "m", ".", "Table", "(", "style_class", "=", "'table table-condensed table-striped'", ")", "header", "=", "m", ".", "Row", "(", ")", "header", ".", "add", "(", "m", ".", "Cell", "(", "tr", "(", "'Class name'", ")", ")", ")", "header", ".", "add", "(", "m", ".", "Cell", "(", "tr", "(", "'Value'", ")", ")", ")", "table", ".", "add", "(", "header", ")", "classes", "=", "classification", ".", "get", "(", "'classes'", ")", "# Sort by value, put the lowest first", "classes", "=", "sorted", "(", "classes", ",", "key", "=", "lambda", "k", ":", "k", "[", "'value'", "]", ")", "for", "the_class", "in", "classes", ":", "value_map", "=", "value_maps", "[", "classification", "[", "'key'", "]", "]", "[", "'classes'", "]", ".", "get", "(", "the_class", "[", "'key'", "]", ",", "[", "]", ")", "row", "=", "m", ".", "Row", "(", ")", "row", ".", "add", "(", "m", ".", "Cell", "(", "the_class", "[", "'name'", "]", ")", ")", "row", ".", "add", "(", "m", ".", "Cell", "(", "', '", ".", "join", "(", "[", "str", "(", "v", ")", "for", "v", "in", "value_map", "]", ")", ")", ")", "table", ".", "add", "(", "row", ")", "message", ".", "add", "(", "table", ")", "# status_text_edit = QTextBrowser(None)", "status_text_edit", "=", "QWebView", "(", "None", ")", "status_text_edit", ".", "setSizePolicy", "(", "QSizePolicy", ".", "Ignored", ",", "QSizePolicy", ".", "Ignored", ")", "status_text_edit", ".", "page", "(", ")", ".", "mainFrame", "(", ")", ".", "setScrollBarPolicy", "(", "Qt", ".", "Horizontal", ",", "Qt", ".", "ScrollBarAlwaysOff", ")", "html_string", "=", "html_header", "(", ")", "+", "message", ".", "to_html", "(", ")", "+", "html_footer", "(", ")", "status_text_edit", ".", "setHtml", "(", "html_string", ")", "self", ".", "right_layout", ".", "addWidget", "(", "status_text_edit", ")" ]
Setup the UI for QTextEdit to show the current state.
[ "Setup", "the", "UI", "for", "QTextEdit", "to", "show", "the", "current", "state", "." ]
python
train
CivicSpleen/ambry
ambry/orm/file.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/file.py#L90-L107
def unpacked_contents(self): """ :return: """ from nbformat import read import msgpack if self.mime_type == 'text/plain': return self.contents.decode('utf-8') elif self.mime_type == 'application/msgpack': # FIXME: Note: I'm not sure that encoding='utf-8' will not break old data. # We need utf-8 to make python3 to work. (kazbek) # return msgpack.unpackb(self.contents) return msgpack.unpackb(self.contents, encoding='utf-8') else: return self.contents
[ "def", "unpacked_contents", "(", "self", ")", ":", "from", "nbformat", "import", "read", "import", "msgpack", "if", "self", ".", "mime_type", "==", "'text/plain'", ":", "return", "self", ".", "contents", ".", "decode", "(", "'utf-8'", ")", "elif", "self", ".", "mime_type", "==", "'application/msgpack'", ":", "# FIXME: Note: I'm not sure that encoding='utf-8' will not break old data.", "# We need utf-8 to make python3 to work. (kazbek)", "# return msgpack.unpackb(self.contents)", "return", "msgpack", ".", "unpackb", "(", "self", ".", "contents", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "return", "self", ".", "contents" ]
:return:
[ ":", "return", ":" ]
python
train
goldhand/django-nupages
nupages/views.py
https://github.com/goldhand/django-nupages/blob/4e54fae7e057f9530c22dc30c03812fd660cb7f4/nupages/views.py#L87-L96
def get_template_names(self): ''' Looks for a custom_template value and prepends it to template_names if it exists otherwise 'nupages/page_detail.html' is used ''' template_names = super(PageDetail, self).get_template_names() if self.get_object().custom_template: # there is a custom template, insert it before 'template_name' template_names.insert(0, self.get_object().custom_template) return template_names
[ "def", "get_template_names", "(", "self", ")", ":", "template_names", "=", "super", "(", "PageDetail", ",", "self", ")", ".", "get_template_names", "(", ")", "if", "self", ".", "get_object", "(", ")", ".", "custom_template", ":", "# there is a custom template, insert it before 'template_name'", "template_names", ".", "insert", "(", "0", ",", "self", ".", "get_object", "(", ")", ".", "custom_template", ")", "return", "template_names" ]
Looks for a custom_template value and prepends it to template_names if it exists otherwise 'nupages/page_detail.html' is used
[ "Looks", "for", "a", "custom_template", "value", "and", "prepends", "it", "to", "template_names", "if", "it", "exists", "otherwise", "nupages", "/", "page_detail", ".", "html", "is", "used" ]
python
train
rbaier/python-urltools
urltools/urltools.py
https://github.com/rbaier/python-urltools/blob/76bf599aeb4cb463df8e38367aa40a7d8ec7d9a1/urltools/urltools.py#L134-L147
def encode(url): """Encode URL.""" parts = extract(url) return construct(URL(parts.scheme, parts.username, parts.password, _idna_encode(parts.subdomain), _idna_encode(parts.domain), _idna_encode(parts.tld), parts.port, quote(parts.path.encode('utf-8')), _encode_query(parts.query), quote(parts.fragment.encode('utf-8')), None))
[ "def", "encode", "(", "url", ")", ":", "parts", "=", "extract", "(", "url", ")", "return", "construct", "(", "URL", "(", "parts", ".", "scheme", ",", "parts", ".", "username", ",", "parts", ".", "password", ",", "_idna_encode", "(", "parts", ".", "subdomain", ")", ",", "_idna_encode", "(", "parts", ".", "domain", ")", ",", "_idna_encode", "(", "parts", ".", "tld", ")", ",", "parts", ".", "port", ",", "quote", "(", "parts", ".", "path", ".", "encode", "(", "'utf-8'", ")", ")", ",", "_encode_query", "(", "parts", ".", "query", ")", ",", "quote", "(", "parts", ".", "fragment", ".", "encode", "(", "'utf-8'", ")", ")", ",", "None", ")", ")" ]
Encode URL.
[ "Encode", "URL", "." ]
python
train
ARMmbed/icetea
icetea_lib/tools/file/SessionFiles.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/file/SessionFiles.py#L75-L96
def read_file(self, filepath=None, filename=None): """ Tries to read JSON content from filename and convert it to a dict. :param filepath: Path where the file is :param filename: File name :return: Dictionary read from the file :raises EnvironmentError, ValueError """ name = filename if filename else self.filename path = filepath if filepath else self.filepath name = self._ends_with(name, ".json") path = self._ends_with(path, os.path.sep) try: return self._read_json(path, name) except EnvironmentError as error: self.logger.error("Error while opening or reading the file: {}".format(error)) raise except ValueError as error: self.logger.error("File contents cannot be decoded to JSON: {}".format(error)) raise
[ "def", "read_file", "(", "self", ",", "filepath", "=", "None", ",", "filename", "=", "None", ")", ":", "name", "=", "filename", "if", "filename", "else", "self", ".", "filename", "path", "=", "filepath", "if", "filepath", "else", "self", ".", "filepath", "name", "=", "self", ".", "_ends_with", "(", "name", ",", "\".json\"", ")", "path", "=", "self", ".", "_ends_with", "(", "path", ",", "os", ".", "path", ".", "sep", ")", "try", ":", "return", "self", ".", "_read_json", "(", "path", ",", "name", ")", "except", "EnvironmentError", "as", "error", ":", "self", ".", "logger", ".", "error", "(", "\"Error while opening or reading the file: {}\"", ".", "format", "(", "error", ")", ")", "raise", "except", "ValueError", "as", "error", ":", "self", ".", "logger", ".", "error", "(", "\"File contents cannot be decoded to JSON: {}\"", ".", "format", "(", "error", ")", ")", "raise" ]
Tries to read JSON content from filename and convert it to a dict. :param filepath: Path where the file is :param filename: File name :return: Dictionary read from the file :raises EnvironmentError, ValueError
[ "Tries", "to", "read", "JSON", "content", "from", "filename", "and", "convert", "it", "to", "a", "dict", "." ]
python
train
ornlneutronimaging/ImagingReso
ImagingReso/_utilities.py
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L202-L261
def formula_to_dictionary(formula='', thickness=np.NaN, density=np.NaN, database='ENDF_VII'): """create dictionary based on formula given Parameters: =========== formula: string ex: 'AgCo2' ex: 'Ag' thickness: float (in mm) default is np.NaN density: float (in g/cm3) default is np.NaN database: string (default is ENDV_VIII). Database where to look for elements Raises: ======= ValueError if one of the element is missing from the database Return: ======= the dictionary of the elements passed ex: {'AgCo2': {'elements': ['Ag','Co'], 'stoichiometric_ratio': [1,2], 'thickness': {'value': thickness, 'units': 'mm'}, 'density': {'value': density, 'units': 'g/cm3'}, 'molar_mass': {'value': np.nan, 'units': 'g/mol'}, } """ if '.' in formula: raise ValueError("formula '{}' is invalid, containing symbol '{}' !".format(formula, '.')) _formula_parsed = re.findall(r'([A-Z][a-z]*)(\d*)', formula) if len(_formula_parsed) == 0: raise ValueError("formula '{}' is invalid !".format(formula)) # _dictionary = {} _elements_array = [] _atomic_ratio_array = [] for _element in _formula_parsed: [_single_element, _atomic_ratio] = list(_element) if not is_element_in_database(element=_single_element, database=database): raise ValueError("element '{}' not found in the database '{}'!".format(_single_element, database)) if _atomic_ratio == '': _atomic_ratio = 1 _atomic_ratio_array.append(int(_atomic_ratio)) _elements_array.append(_single_element) _dict = {formula: {'elements': _elements_array, 'stoichiometric_ratio': _atomic_ratio_array, 'thickness': {'value': thickness, 'units': 'mm'}, 'density': {'value': density, 'units': 'g/cm3'}, 'molar_mass': {'value': np.nan, 'units': 'g/mol'} } } return _dict
[ "def", "formula_to_dictionary", "(", "formula", "=", "''", ",", "thickness", "=", "np", ".", "NaN", ",", "density", "=", "np", ".", "NaN", ",", "database", "=", "'ENDF_VII'", ")", ":", "if", "'.'", "in", "formula", ":", "raise", "ValueError", "(", "\"formula '{}' is invalid, containing symbol '{}' !\"", ".", "format", "(", "formula", ",", "'.'", ")", ")", "_formula_parsed", "=", "re", ".", "findall", "(", "r'([A-Z][a-z]*)(\\d*)'", ",", "formula", ")", "if", "len", "(", "_formula_parsed", ")", "==", "0", ":", "raise", "ValueError", "(", "\"formula '{}' is invalid !\"", ".", "format", "(", "formula", ")", ")", "# _dictionary = {}", "_elements_array", "=", "[", "]", "_atomic_ratio_array", "=", "[", "]", "for", "_element", "in", "_formula_parsed", ":", "[", "_single_element", ",", "_atomic_ratio", "]", "=", "list", "(", "_element", ")", "if", "not", "is_element_in_database", "(", "element", "=", "_single_element", ",", "database", "=", "database", ")", ":", "raise", "ValueError", "(", "\"element '{}' not found in the database '{}'!\"", ".", "format", "(", "_single_element", ",", "database", ")", ")", "if", "_atomic_ratio", "==", "''", ":", "_atomic_ratio", "=", "1", "_atomic_ratio_array", ".", "append", "(", "int", "(", "_atomic_ratio", ")", ")", "_elements_array", ".", "append", "(", "_single_element", ")", "_dict", "=", "{", "formula", ":", "{", "'elements'", ":", "_elements_array", ",", "'stoichiometric_ratio'", ":", "_atomic_ratio_array", ",", "'thickness'", ":", "{", "'value'", ":", "thickness", ",", "'units'", ":", "'mm'", "}", ",", "'density'", ":", "{", "'value'", ":", "density", ",", "'units'", ":", "'g/cm3'", "}", ",", "'molar_mass'", ":", "{", "'value'", ":", "np", ".", "nan", ",", "'units'", ":", "'g/mol'", "}", "}", "}", "return", "_dict" ]
create dictionary based on formula given Parameters: =========== formula: string ex: 'AgCo2' ex: 'Ag' thickness: float (in mm) default is np.NaN density: float (in g/cm3) default is np.NaN database: string (default is ENDV_VIII). Database where to look for elements Raises: ======= ValueError if one of the element is missing from the database Return: ======= the dictionary of the elements passed ex: {'AgCo2': {'elements': ['Ag','Co'], 'stoichiometric_ratio': [1,2], 'thickness': {'value': thickness, 'units': 'mm'}, 'density': {'value': density, 'units': 'g/cm3'}, 'molar_mass': {'value': np.nan, 'units': 'g/mol'}, }
[ "create", "dictionary", "based", "on", "formula", "given", "Parameters", ":", "===========", "formula", ":", "string", "ex", ":", "AgCo2", "ex", ":", "Ag", "thickness", ":", "float", "(", "in", "mm", ")", "default", "is", "np", ".", "NaN", "density", ":", "float", "(", "in", "g", "/", "cm3", ")", "default", "is", "np", ".", "NaN", "database", ":", "string", "(", "default", "is", "ENDV_VIII", ")", ".", "Database", "where", "to", "look", "for", "elements", "Raises", ":", "=======", "ValueError", "if", "one", "of", "the", "element", "is", "missing", "from", "the", "database", "Return", ":", "=======", "the", "dictionary", "of", "the", "elements", "passed", "ex", ":", "{", "AgCo2", ":", "{", "elements", ":", "[", "Ag", "Co", "]", "stoichiometric_ratio", ":", "[", "1", "2", "]", "thickness", ":", "{", "value", ":", "thickness", "units", ":", "mm", "}", "density", ":", "{", "value", ":", "density", "units", ":", "g", "/", "cm3", "}", "molar_mass", ":", "{", "value", ":", "np", ".", "nan", "units", ":", "g", "/", "mol", "}", "}" ]
python
train
objectrocket/python-client
objectrocket/acls.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/acls.py#L78-L90
def get(self, instance, acl): """Get an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance from which to fetch the ACL. :param str acl: The ID of the ACL to fetch. :returns: An :py:class:`Acl` object, or None if ACL does not exist. :rtype: :py:class:`Acl` """ base_url = self._url.format(instance=instance) url = '{base}{aclid}/'.format(base=base_url, aclid=acl) response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_acl(data)
[ "def", "get", "(", "self", ",", "instance", ",", "acl", ")", ":", "base_url", "=", "self", ".", "_url", ".", "format", "(", "instance", "=", "instance", ")", "url", "=", "'{base}{aclid}/'", ".", "format", "(", "base", "=", "base_url", ",", "aclid", "=", "acl", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "*", "*", "self", ".", "_default_request_kwargs", ")", "data", "=", "self", ".", "_get_response_data", "(", "response", ")", "return", "self", ".", "_concrete_acl", "(", "data", ")" ]
Get an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance from which to fetch the ACL. :param str acl: The ID of the ACL to fetch. :returns: An :py:class:`Acl` object, or None if ACL does not exist. :rtype: :py:class:`Acl`
[ "Get", "an", "ACL", "by", "ID", "belonging", "to", "the", "instance", "specified", "by", "name", "." ]
python
train
koalalorenzo/python-digitalocean
digitalocean/baseapi.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/baseapi.py#L151-L165
def get_timeout(self): """ Checks if any timeout for the requests to DigitalOcean is required. To set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment variable. """ timeout_str = os.environ.get(REQUEST_TIMEOUT_ENV_VAR) if timeout_str: try: return float(timeout_str) except: self._log.error('Failed parsing the request read timeout of ' '"%s". Please use a valid float number!' % timeout_str) return None
[ "def", "get_timeout", "(", "self", ")", ":", "timeout_str", "=", "os", ".", "environ", ".", "get", "(", "REQUEST_TIMEOUT_ENV_VAR", ")", "if", "timeout_str", ":", "try", ":", "return", "float", "(", "timeout_str", ")", "except", ":", "self", ".", "_log", ".", "error", "(", "'Failed parsing the request read timeout of '", "'\"%s\". Please use a valid float number!'", "%", "timeout_str", ")", "return", "None" ]
Checks if any timeout for the requests to DigitalOcean is required. To set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment variable.
[ "Checks", "if", "any", "timeout", "for", "the", "requests", "to", "DigitalOcean", "is", "required", ".", "To", "set", "a", "timeout", "use", "the", "REQUEST_TIMEOUT_ENV_VAR", "environment", "variable", "." ]
python
valid
lipoja/URLExtract
urlextract/urlextract_core.py
https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L557-L568
def find_urls(self, text, only_unique=False): """ Find all URLs in given text. :param str text: text where we want to find URLs :param bool only_unique: return only unique URLs :return: list of URLs found in text :rtype: list """ urls = self.gen_urls(text) urls = OrderedDict.fromkeys(urls) if only_unique else urls return list(urls)
[ "def", "find_urls", "(", "self", ",", "text", ",", "only_unique", "=", "False", ")", ":", "urls", "=", "self", ".", "gen_urls", "(", "text", ")", "urls", "=", "OrderedDict", ".", "fromkeys", "(", "urls", ")", "if", "only_unique", "else", "urls", "return", "list", "(", "urls", ")" ]
Find all URLs in given text. :param str text: text where we want to find URLs :param bool only_unique: return only unique URLs :return: list of URLs found in text :rtype: list
[ "Find", "all", "URLs", "in", "given", "text", "." ]
python
train
Robpol86/libnl
example_scan_access_points.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/example_scan_access_points.py#L289-L319
def print_table(data): """Print the table of detected SSIDs and their data to screen. Positional arguments: data -- list of dictionaries. """ table = AsciiTable([COLUMNS]) table.justify_columns[2] = 'right' table.justify_columns[3] = 'right' table.justify_columns[4] = 'right' table_data = list() for row_in in data: row_out = [ str(row_in.get('ssid', '')).replace('\0', ''), str(row_in.get('security', '')), str(row_in.get('channel', '')), str(row_in.get('frequency', '')), str(row_in.get('signal', '')), str(row_in.get('bssid', '')), ] if row_out[3]: row_out[3] += ' MHz' if row_out[4]: row_out[4] += ' dBm' table_data.append(row_out) sort_by_column = [c.lower() for c in COLUMNS].index(OPTIONS['--key'].lower()) table_data.sort(key=lambda c: c[sort_by_column], reverse=OPTIONS['--reverse']) table.table_data.extend(table_data) print(table.table)
[ "def", "print_table", "(", "data", ")", ":", "table", "=", "AsciiTable", "(", "[", "COLUMNS", "]", ")", "table", ".", "justify_columns", "[", "2", "]", "=", "'right'", "table", ".", "justify_columns", "[", "3", "]", "=", "'right'", "table", ".", "justify_columns", "[", "4", "]", "=", "'right'", "table_data", "=", "list", "(", ")", "for", "row_in", "in", "data", ":", "row_out", "=", "[", "str", "(", "row_in", ".", "get", "(", "'ssid'", ",", "''", ")", ")", ".", "replace", "(", "'\\0'", ",", "''", ")", ",", "str", "(", "row_in", ".", "get", "(", "'security'", ",", "''", ")", ")", ",", "str", "(", "row_in", ".", "get", "(", "'channel'", ",", "''", ")", ")", ",", "str", "(", "row_in", ".", "get", "(", "'frequency'", ",", "''", ")", ")", ",", "str", "(", "row_in", ".", "get", "(", "'signal'", ",", "''", ")", ")", ",", "str", "(", "row_in", ".", "get", "(", "'bssid'", ",", "''", ")", ")", ",", "]", "if", "row_out", "[", "3", "]", ":", "row_out", "[", "3", "]", "+=", "' MHz'", "if", "row_out", "[", "4", "]", ":", "row_out", "[", "4", "]", "+=", "' dBm'", "table_data", ".", "append", "(", "row_out", ")", "sort_by_column", "=", "[", "c", ".", "lower", "(", ")", "for", "c", "in", "COLUMNS", "]", ".", "index", "(", "OPTIONS", "[", "'--key'", "]", ".", "lower", "(", ")", ")", "table_data", ".", "sort", "(", "key", "=", "lambda", "c", ":", "c", "[", "sort_by_column", "]", ",", "reverse", "=", "OPTIONS", "[", "'--reverse'", "]", ")", "table", ".", "table_data", ".", "extend", "(", "table_data", ")", "print", "(", "table", ".", "table", ")" ]
Print the table of detected SSIDs and their data to screen. Positional arguments: data -- list of dictionaries.
[ "Print", "the", "table", "of", "detected", "SSIDs", "and", "their", "data", "to", "screen", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/list_types.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/list_types.py#L277-L281
def process_result_value(self, value: Optional[str], dialect: Dialect) -> List[int]: """Convert things on the way from the database to Python.""" retval = self._dbstr_to_intlist(value) return retval
[ "def", "process_result_value", "(", "self", ",", "value", ":", "Optional", "[", "str", "]", ",", "dialect", ":", "Dialect", ")", "->", "List", "[", "int", "]", ":", "retval", "=", "self", ".", "_dbstr_to_intlist", "(", "value", ")", "return", "retval" ]
Convert things on the way from the database to Python.
[ "Convert", "things", "on", "the", "way", "from", "the", "database", "to", "Python", "." ]
python
train
shaypal5/utilp
utilp/classes/classes.py
https://github.com/shaypal5/utilp/blob/932abaf8ccfd06557632b7dbebc7775da1de8430/utilp/classes/classes.py#L273-L278
def tqdm(self, iterable, **kwargs): """Wraps the given iterable with a tqdm progress bar if this logger is set to verbose. Otherwise, returns the iterable unchanged.""" if 'disable' in kwargs: kwargs.pop('disable') return tqdm(iterable, disable=not self.verbose, **kwargs)
[ "def", "tqdm", "(", "self", ",", "iterable", ",", "*", "*", "kwargs", ")", ":", "if", "'disable'", "in", "kwargs", ":", "kwargs", ".", "pop", "(", "'disable'", ")", "return", "tqdm", "(", "iterable", ",", "disable", "=", "not", "self", ".", "verbose", ",", "*", "*", "kwargs", ")" ]
Wraps the given iterable with a tqdm progress bar if this logger is set to verbose. Otherwise, returns the iterable unchanged.
[ "Wraps", "the", "given", "iterable", "with", "a", "tqdm", "progress", "bar", "if", "this", "logger", "is", "set", "to", "verbose", ".", "Otherwise", "returns", "the", "iterable", "unchanged", "." ]
python
train
robotpy/pyfrc
lib/pyfrc/sim/field/elements.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/field/elements.py#L36-L53
def move(self, v): """v is a tuple of x/y coordinates to move object""" # rotate movement vector according to the angle of the object vx, vy = v vx, vy = ( vx * math.cos(self.angle) - vy * math.sin(self.angle), vx * math.sin(self.angle) + vy * math.cos(self.angle), ) def _move(xy): x, y = xy return x + vx, y + vy # TODO: detect other objects in the way, stop movement appropriately self.pts = [p for p in map(lambda x: _move(x), self.pts)] self.center = _move(self.center)
[ "def", "move", "(", "self", ",", "v", ")", ":", "# rotate movement vector according to the angle of the object", "vx", ",", "vy", "=", "v", "vx", ",", "vy", "=", "(", "vx", "*", "math", ".", "cos", "(", "self", ".", "angle", ")", "-", "vy", "*", "math", ".", "sin", "(", "self", ".", "angle", ")", ",", "vx", "*", "math", ".", "sin", "(", "self", ".", "angle", ")", "+", "vy", "*", "math", ".", "cos", "(", "self", ".", "angle", ")", ",", ")", "def", "_move", "(", "xy", ")", ":", "x", ",", "y", "=", "xy", "return", "x", "+", "vx", ",", "y", "+", "vy", "# TODO: detect other objects in the way, stop movement appropriately", "self", ".", "pts", "=", "[", "p", "for", "p", "in", "map", "(", "lambda", "x", ":", "_move", "(", "x", ")", ",", "self", ".", "pts", ")", "]", "self", ".", "center", "=", "_move", "(", "self", ".", "center", ")" ]
v is a tuple of x/y coordinates to move object
[ "v", "is", "a", "tuple", "of", "x", "/", "y", "coordinates", "to", "move", "object" ]
python
train
OCR-D/core
ocrd/ocrd/cli/workspace.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/cli/workspace.py#L267-L272
def workspace_backup_add(ctx): """ Create a new backup """ backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)) backup_manager.add()
[ "def", "workspace_backup_add", "(", "ctx", ")", ":", "backup_manager", "=", "WorkspaceBackupManager", "(", "Workspace", "(", "ctx", ".", "resolver", ",", "directory", "=", "ctx", ".", "directory", ",", "mets_basename", "=", "ctx", ".", "mets_basename", ",", "automatic_backup", "=", "ctx", ".", "automatic_backup", ")", ")", "backup_manager", ".", "add", "(", ")" ]
Create a new backup
[ "Create", "a", "new", "backup" ]
python
train
mitsei/dlkit
dlkit/records/assessment/edx/item_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/edx/item_records.py#L313-L324
def get_solution(self, parameters=None): """stub""" if not self.has_solution(): raise IllegalState() try: if not self.get_text('python_script'): return self.get_text('solution').text if not parameters: parameters = self.get_parameters() return self._get_parameterized_text(parameters) except Exception: return self.get_text('solution').text
[ "def", "get_solution", "(", "self", ",", "parameters", "=", "None", ")", ":", "if", "not", "self", ".", "has_solution", "(", ")", ":", "raise", "IllegalState", "(", ")", "try", ":", "if", "not", "self", ".", "get_text", "(", "'python_script'", ")", ":", "return", "self", ".", "get_text", "(", "'solution'", ")", ".", "text", "if", "not", "parameters", ":", "parameters", "=", "self", ".", "get_parameters", "(", ")", "return", "self", ".", "_get_parameterized_text", "(", "parameters", ")", "except", "Exception", ":", "return", "self", ".", "get_text", "(", "'solution'", ")", ".", "text" ]
stub
[ "stub" ]
python
train
inasafe/inasafe
safe/messaging/item/horizontal_rule.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/horizontal_rule.py#L42-L55
def to_html(self, **kwargs): """Render as html :returns: the html representation (<hr>) :rtype: str We pass the kwargs on to the base class so an exception is raised if invalid keywords were passed. See: http://stackoverflow.com/questions/13124961/ how-to-pass-arguments-efficiently-kwargs-in-python """ super(HorizontalRule, self).__init__(**kwargs) return '<hr%s/>\n' % self.html_attributes()
[ "def", "to_html", "(", "self", ",", "*", "*", "kwargs", ")", ":", "super", "(", "HorizontalRule", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "return", "'<hr%s/>\\n'", "%", "self", ".", "html_attributes", "(", ")" ]
Render as html :returns: the html representation (<hr>) :rtype: str We pass the kwargs on to the base class so an exception is raised if invalid keywords were passed. See: http://stackoverflow.com/questions/13124961/ how-to-pass-arguments-efficiently-kwargs-in-python
[ "Render", "as", "html" ]
python
train
keras-rl/keras-rl
rl/policy.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L314-L346
def select_action(self, q_values): """Return the selected action # Arguments q_values (np.ndarray): List of the estimations of Q for each action # Returns Selection action """ # We can't use BGE during testing, since we don't have access to the # action_counts at the end of training. assert self.agent.training, "BoltzmannGumbelQPolicy should only be used for training, not testing" assert q_values.ndim == 1, q_values.ndim q_values = q_values.astype('float64') # If we are starting training, we should reset the action_counts. # Otherwise, action_counts should already be initialized, since we # always do so when we begin training. if self.agent.step == 0: self.action_counts = np.ones(q_values.shape) assert self.action_counts is not None, self.agent.step assert self.action_counts.shape == q_values.shape, (self.action_counts.shape, q_values.shape) beta = self.C/np.sqrt(self.action_counts) Z = np.random.gumbel(size=q_values.shape) perturbation = beta * Z perturbed_q_values = q_values + perturbation action = np.argmax(perturbed_q_values) self.action_counts[action] += 1 return action
[ "def", "select_action", "(", "self", ",", "q_values", ")", ":", "# We can't use BGE during testing, since we don't have access to the", "# action_counts at the end of training.", "assert", "self", ".", "agent", ".", "training", ",", "\"BoltzmannGumbelQPolicy should only be used for training, not testing\"", "assert", "q_values", ".", "ndim", "==", "1", ",", "q_values", ".", "ndim", "q_values", "=", "q_values", ".", "astype", "(", "'float64'", ")", "# If we are starting training, we should reset the action_counts.", "# Otherwise, action_counts should already be initialized, since we", "# always do so when we begin training.", "if", "self", ".", "agent", ".", "step", "==", "0", ":", "self", ".", "action_counts", "=", "np", ".", "ones", "(", "q_values", ".", "shape", ")", "assert", "self", ".", "action_counts", "is", "not", "None", ",", "self", ".", "agent", ".", "step", "assert", "self", ".", "action_counts", ".", "shape", "==", "q_values", ".", "shape", ",", "(", "self", ".", "action_counts", ".", "shape", ",", "q_values", ".", "shape", ")", "beta", "=", "self", ".", "C", "/", "np", ".", "sqrt", "(", "self", ".", "action_counts", ")", "Z", "=", "np", ".", "random", ".", "gumbel", "(", "size", "=", "q_values", ".", "shape", ")", "perturbation", "=", "beta", "*", "Z", "perturbed_q_values", "=", "q_values", "+", "perturbation", "action", "=", "np", ".", "argmax", "(", "perturbed_q_values", ")", "self", ".", "action_counts", "[", "action", "]", "+=", "1", "return", "action" ]
Return the selected action # Arguments q_values (np.ndarray): List of the estimations of Q for each action # Returns Selection action
[ "Return", "the", "selected", "action" ]
python
train
awslabs/aws-sam-cli
samcli/commands/local/generate_event/event_generation.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/generate_event/event_generation.py#L45-L63
def get_command(self, ctx, cmd_name): """ gets the subcommands under the service name Parameters ---------- ctx : Context the context object passed into the method cmd_name : str the service name Returns ------- EventTypeSubCommand: returns subcommand if successful, None if not. """ if cmd_name not in self.all_cmds: return None return EventTypeSubCommand(self.events_lib, cmd_name, self.all_cmds[cmd_name])
[ "def", "get_command", "(", "self", ",", "ctx", ",", "cmd_name", ")", ":", "if", "cmd_name", "not", "in", "self", ".", "all_cmds", ":", "return", "None", "return", "EventTypeSubCommand", "(", "self", ".", "events_lib", ",", "cmd_name", ",", "self", ".", "all_cmds", "[", "cmd_name", "]", ")" ]
gets the subcommands under the service name Parameters ---------- ctx : Context the context object passed into the method cmd_name : str the service name Returns ------- EventTypeSubCommand: returns subcommand if successful, None if not.
[ "gets", "the", "subcommands", "under", "the", "service", "name" ]
python
train
kubernetes-client/python
kubernetes/client/apis/node_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/node_v1beta1_api.py#L823-L846
def replace_runtime_class(self, name, body, **kwargs): """ replace the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param V1beta1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_runtime_class_with_http_info(name, body, **kwargs) else: (data) = self.replace_runtime_class_with_http_info(name, body, **kwargs) return data
[ "def", "replace_runtime_class", "(", "self", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "replace_runtime_class_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "replace_runtime_class_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
replace the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param V1beta1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread.
[ "replace", "the", "specified", "RuntimeClass", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "replace_runtime_class", "(", "name", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_corruption_check/main.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L321-L347
def check_files_on_host(java_home, host, files, batch_size): """Check the files on the host. Files are grouped together in groups of batch_size files. The dump class will be executed on each batch, sequentially. :param java_home: the JAVA_HOME of the broker :type java_home: str :param host: the host where the tool will be executed :type host: str :param files: the list of files to be analyzed :type files: list of str :param batch_size: the size of each batch :type batch_size: int """ with closing(ssh_client(host)) as ssh: for i, batch in enumerate(chunks(files, batch_size)): command = check_corrupted_files_cmd(java_home, batch) _, stdout, stderr = ssh.exec_command(command) report_stderr(host, stderr) print( " {host}: file {n_file} of {total}".format( host=host, n_file=(i * DEFAULT_BATCH_SIZE), total=len(files), ) ) parse_output(host, stdout)
[ "def", "check_files_on_host", "(", "java_home", ",", "host", ",", "files", ",", "batch_size", ")", ":", "with", "closing", "(", "ssh_client", "(", "host", ")", ")", "as", "ssh", ":", "for", "i", ",", "batch", "in", "enumerate", "(", "chunks", "(", "files", ",", "batch_size", ")", ")", ":", "command", "=", "check_corrupted_files_cmd", "(", "java_home", ",", "batch", ")", "_", ",", "stdout", ",", "stderr", "=", "ssh", ".", "exec_command", "(", "command", ")", "report_stderr", "(", "host", ",", "stderr", ")", "print", "(", "\" {host}: file {n_file} of {total}\"", ".", "format", "(", "host", "=", "host", ",", "n_file", "=", "(", "i", "*", "DEFAULT_BATCH_SIZE", ")", ",", "total", "=", "len", "(", "files", ")", ",", ")", ")", "parse_output", "(", "host", ",", "stdout", ")" ]
Check the files on the host. Files are grouped together in groups of batch_size files. The dump class will be executed on each batch, sequentially. :param java_home: the JAVA_HOME of the broker :type java_home: str :param host: the host where the tool will be executed :type host: str :param files: the list of files to be analyzed :type files: list of str :param batch_size: the size of each batch :type batch_size: int
[ "Check", "the", "files", "on", "the", "host", ".", "Files", "are", "grouped", "together", "in", "groups", "of", "batch_size", "files", ".", "The", "dump", "class", "will", "be", "executed", "on", "each", "batch", "sequentially", "." ]
python
train
Robin8Put/pmes
ams/views.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/ams/views.py#L592-L718
async def post(self): """ Funds from account to given address. 1. Verify signature 2. Freeze senders amount. 3. Request to withdraw server. 4. Call balances sub_frozen method. Accepts: - message [dict]: - coinid [string] - amount [integer] - address [string] - timestamp [float] - recvWindow [float] - public_key - signature Returns: - message [dict]: - coinid [string] - amount [integer] - address [string] - timestamp [float] - recvWindow [float] - public_key - signature - txid [string] """ # Sign-verifying functional if settings.SIGNATURE_VERIFICATION: super().verify() logging.debug("\n\n[+] -- Withdraw debugging") # Get data from requests body data = json.loads(self.request.body) public_key = data.get("public_key") signature = data.get("signature") if isinstance(data.get("message"), str): message = json.loads(data["message"]) elif isinstance(data.get("message"), dict): message = data["message"] # Get data from signed message coinid = message.get("coinid") amount = message.get("amount") address = message.get("address") timestamp = message.get("timestamp") recvWindow = message.get("recvWindow") # if not all([coinid, amount, address, public_key, signature, timestamp, recvWindow]): data.update({"error":400, "reason":"Missed required fields. "}) self.set_status(400) self.write(data) raise tornado.web.Finish logging.debug(data) # Get account account = await self.account.getaccountdata(public_key=public_key) if "error" in account.keys(): data.update(account) self.set_status(404) self.write(data) raise tornado.web.Finish logging.debug("\n Senders account") logging.debug(account) # Request to balance and call freeze method fee = await self.account.withdraw_fee(coinid) freeze = await self.account.balance.freeze(uid=account["id"], coinid=coinid, amount=amount + fee) logging.debug("\n Frozen balance") logging.debug(freeze) if "error" in freeze.keys(): data.update(freeze) self.set_status(freeze["error"]) self.write(data) raise tornado.web.Finish # Request to withdraw server txid = await self.account.withdraw(amount=amount, coinid=coinid, address=address) logging.debug("\n Withdraw server response") logging.debug(txid) # Check if txid exists if "error" in txid.keys(): await self.account.balance.unfreeze(uid=account["id"], coinid=coinid, amount=amount + fee) data.update(txid) self.set_status(500) self.write(data) raise tornado.web.Finish # Add balance to recepient #add_active = await self.account.balance.add_active(address=address, coinid=coinid, # amount=amount) #if "error" in add_active.keys(): # await self.account.balance.unfreeze(uid=account["id"], coinid=coinid, # amount=amount + fee) # data.update(add_active) # self.set_status(add_active["error"]) # self.write(data) # raise tornado.web.Finish # Submit amount from frozen balance sub_frozen = await self.account.balance.sub_frozen(uid=account["id"], coinid=coinid, amount=amount + fee) if "error" in sub_frozen.keys(): data.update(sub_frozen) self.set_status(sub_frozen["error"]) self.write(data) raise tornado.web.Finish logging.debug("\n Sub frozen") logging.debug(sub_frozen) await self.account.save_transaction(txid=txid.get("txid"), coinid=coinid, amount=amount, address=address) # Return txid data.update(txid) self.write(data)
[ "async", "def", "post", "(", "self", ")", ":", "# Sign-verifying functional", "if", "settings", ".", "SIGNATURE_VERIFICATION", ":", "super", "(", ")", ".", "verify", "(", ")", "logging", ".", "debug", "(", "\"\\n\\n[+] -- Withdraw debugging\"", ")", "# Get data from requests body", "data", "=", "json", ".", "loads", "(", "self", ".", "request", ".", "body", ")", "public_key", "=", "data", ".", "get", "(", "\"public_key\"", ")", "signature", "=", "data", ".", "get", "(", "\"signature\"", ")", "if", "isinstance", "(", "data", ".", "get", "(", "\"message\"", ")", ",", "str", ")", ":", "message", "=", "json", ".", "loads", "(", "data", "[", "\"message\"", "]", ")", "elif", "isinstance", "(", "data", ".", "get", "(", "\"message\"", ")", ",", "dict", ")", ":", "message", "=", "data", "[", "\"message\"", "]", "# Get data from signed message", "coinid", "=", "message", ".", "get", "(", "\"coinid\"", ")", "amount", "=", "message", ".", "get", "(", "\"amount\"", ")", "address", "=", "message", ".", "get", "(", "\"address\"", ")", "timestamp", "=", "message", ".", "get", "(", "\"timestamp\"", ")", "recvWindow", "=", "message", ".", "get", "(", "\"recvWindow\"", ")", "# ", "if", "not", "all", "(", "[", "coinid", ",", "amount", ",", "address", ",", "public_key", ",", "signature", ",", "timestamp", ",", "recvWindow", "]", ")", ":", "data", ".", "update", "(", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields. \"", "}", ")", "self", ".", "set_status", "(", "400", ")", "self", ".", "write", "(", "data", ")", "raise", "tornado", ".", "web", ".", "Finish", "logging", ".", "debug", "(", "data", ")", "# Get account", "account", "=", "await", "self", ".", "account", ".", "getaccountdata", "(", "public_key", "=", "public_key", ")", "if", "\"error\"", "in", "account", ".", "keys", "(", ")", ":", "data", ".", "update", "(", "account", ")", "self", ".", "set_status", "(", "404", ")", "self", ".", "write", "(", "data", ")", "raise", "tornado", ".", "web", ".", "Finish", "logging", ".", "debug", "(", "\"\\n Senders account\"", ")", "logging", ".", "debug", "(", "account", ")", "# Request to balance and call freeze method", "fee", "=", "await", "self", ".", "account", ".", "withdraw_fee", "(", "coinid", ")", "freeze", "=", "await", "self", ".", "account", ".", "balance", ".", "freeze", "(", "uid", "=", "account", "[", "\"id\"", "]", ",", "coinid", "=", "coinid", ",", "amount", "=", "amount", "+", "fee", ")", "logging", ".", "debug", "(", "\"\\n Frozen balance\"", ")", "logging", ".", "debug", "(", "freeze", ")", "if", "\"error\"", "in", "freeze", ".", "keys", "(", ")", ":", "data", ".", "update", "(", "freeze", ")", "self", ".", "set_status", "(", "freeze", "[", "\"error\"", "]", ")", "self", ".", "write", "(", "data", ")", "raise", "tornado", ".", "web", ".", "Finish", "# Request to withdraw server", "txid", "=", "await", "self", ".", "account", ".", "withdraw", "(", "amount", "=", "amount", ",", "coinid", "=", "coinid", ",", "address", "=", "address", ")", "logging", ".", "debug", "(", "\"\\n Withdraw server response\"", ")", "logging", ".", "debug", "(", "txid", ")", "# Check if txid exists", "if", "\"error\"", "in", "txid", ".", "keys", "(", ")", ":", "await", "self", ".", "account", ".", "balance", ".", "unfreeze", "(", "uid", "=", "account", "[", "\"id\"", "]", ",", "coinid", "=", "coinid", ",", "amount", "=", "amount", "+", "fee", ")", "data", ".", "update", "(", "txid", ")", "self", ".", "set_status", "(", "500", ")", "self", ".", "write", "(", "data", ")", "raise", "tornado", ".", "web", ".", "Finish", "# Add balance to recepient", "#add_active = await self.account.balance.add_active(address=address, coinid=coinid,", "#\t\t\t\t\t\t\t\t\t\t\t\t\tamount=amount)", "#if \"error\" in add_active.keys():", "#\tawait self.account.balance.unfreeze(uid=account[\"id\"], coinid=coinid,", "#\t\t\t\t\t\t\t\t\t\t\t\tamount=amount + fee)", "#\tdata.update(add_active)", "#\tself.set_status(add_active[\"error\"])", "#\tself.write(data)", "#\traise tornado.web.Finish", "# Submit amount from frozen balance", "sub_frozen", "=", "await", "self", ".", "account", ".", "balance", ".", "sub_frozen", "(", "uid", "=", "account", "[", "\"id\"", "]", ",", "coinid", "=", "coinid", ",", "amount", "=", "amount", "+", "fee", ")", "if", "\"error\"", "in", "sub_frozen", ".", "keys", "(", ")", ":", "data", ".", "update", "(", "sub_frozen", ")", "self", ".", "set_status", "(", "sub_frozen", "[", "\"error\"", "]", ")", "self", ".", "write", "(", "data", ")", "raise", "tornado", ".", "web", ".", "Finish", "logging", ".", "debug", "(", "\"\\n Sub frozen\"", ")", "logging", ".", "debug", "(", "sub_frozen", ")", "await", "self", ".", "account", ".", "save_transaction", "(", "txid", "=", "txid", ".", "get", "(", "\"txid\"", ")", ",", "coinid", "=", "coinid", ",", "amount", "=", "amount", ",", "address", "=", "address", ")", "# Return txid", "data", ".", "update", "(", "txid", ")", "self", ".", "write", "(", "data", ")" ]
Funds from account to given address. 1. Verify signature 2. Freeze senders amount. 3. Request to withdraw server. 4. Call balances sub_frozen method. Accepts: - message [dict]: - coinid [string] - amount [integer] - address [string] - timestamp [float] - recvWindow [float] - public_key - signature Returns: - message [dict]: - coinid [string] - amount [integer] - address [string] - timestamp [float] - recvWindow [float] - public_key - signature - txid [string]
[ "Funds", "from", "account", "to", "given", "address", ".", "1", ".", "Verify", "signature", "2", ".", "Freeze", "senders", "amount", ".", "3", ".", "Request", "to", "withdraw", "server", ".", "4", ".", "Call", "balances", "sub_frozen", "method", "." ]
python
train
jazzband/django-ddp
dddp/alea.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/alea.py#L110-L134
def seed(self, values): """Seed internal state from supplied values.""" if not values: # Meteor uses epoch seconds as the seed if no args supplied, we use # a much more secure seed by default to avoid hash collisions. seed_ids = [int, str, random, self, values, self.__class__] random.shuffle(seed_ids) values = list(map(id, seed_ids)) + [time.time(), os.urandom(512)] mash = Mash() self.c = 1 self.s0 = mash(' ') self.s1 = mash(' ') self.s2 = mash(' ') for val in values: self.s0 -= mash(val) if self.s0 < 0: self.s0 += 1 self.s1 -= mash(val) if self.s1 < 0: self.s1 += 1 self.s2 -= mash(val) if self.s2 < 0: self.s2 += 1
[ "def", "seed", "(", "self", ",", "values", ")", ":", "if", "not", "values", ":", "# Meteor uses epoch seconds as the seed if no args supplied, we use", "# a much more secure seed by default to avoid hash collisions.", "seed_ids", "=", "[", "int", ",", "str", ",", "random", ",", "self", ",", "values", ",", "self", ".", "__class__", "]", "random", ".", "shuffle", "(", "seed_ids", ")", "values", "=", "list", "(", "map", "(", "id", ",", "seed_ids", ")", ")", "+", "[", "time", ".", "time", "(", ")", ",", "os", ".", "urandom", "(", "512", ")", "]", "mash", "=", "Mash", "(", ")", "self", ".", "c", "=", "1", "self", ".", "s0", "=", "mash", "(", "' '", ")", "self", ".", "s1", "=", "mash", "(", "' '", ")", "self", ".", "s2", "=", "mash", "(", "' '", ")", "for", "val", "in", "values", ":", "self", ".", "s0", "-=", "mash", "(", "val", ")", "if", "self", ".", "s0", "<", "0", ":", "self", ".", "s0", "+=", "1", "self", ".", "s1", "-=", "mash", "(", "val", ")", "if", "self", ".", "s1", "<", "0", ":", "self", ".", "s1", "+=", "1", "self", ".", "s2", "-=", "mash", "(", "val", ")", "if", "self", ".", "s2", "<", "0", ":", "self", ".", "s2", "+=", "1" ]
Seed internal state from supplied values.
[ "Seed", "internal", "state", "from", "supplied", "values", "." ]
python
test
saltstack/salt
salt/modules/portage_config.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/portage_config.py#L176-L200
def _unify_keywords(): ''' Merge /etc/portage/package.keywords and /etc/portage/package.accept_keywords. ''' old_path = BASE_PATH.format('keywords') if os.path.exists(old_path): if os.path.isdir(old_path): for triplet in salt.utils.path.os_walk(old_path): for file_name in triplet[2]: file_path = '{0}/{1}'.format(triplet[0], file_name) with salt.utils.files.fopen(file_path) as fh_: for line in fh_: line = salt.utils.stringutils.to_unicode(line).strip() if line and not line.startswith('#'): append_to_package_conf( 'accept_keywords', string=line) shutil.rmtree(old_path) else: with salt.utils.files.fopen(old_path) as fh_: for line in fh_: line = salt.utils.stringutils.to_unicode(line).strip() if line and not line.startswith('#'): append_to_package_conf('accept_keywords', string=line) os.remove(old_path)
[ "def", "_unify_keywords", "(", ")", ":", "old_path", "=", "BASE_PATH", ".", "format", "(", "'keywords'", ")", "if", "os", ".", "path", ".", "exists", "(", "old_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "old_path", ")", ":", "for", "triplet", "in", "salt", ".", "utils", ".", "path", ".", "os_walk", "(", "old_path", ")", ":", "for", "file_name", "in", "triplet", "[", "2", "]", ":", "file_path", "=", "'{0}/{1}'", ".", "format", "(", "triplet", "[", "0", "]", ",", "file_name", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "file_path", ")", "as", "fh_", ":", "for", "line", "in", "fh_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", ".", "strip", "(", ")", "if", "line", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "append_to_package_conf", "(", "'accept_keywords'", ",", "string", "=", "line", ")", "shutil", ".", "rmtree", "(", "old_path", ")", "else", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "old_path", ")", "as", "fh_", ":", "for", "line", "in", "fh_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", ".", "strip", "(", ")", "if", "line", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "append_to_package_conf", "(", "'accept_keywords'", ",", "string", "=", "line", ")", "os", ".", "remove", "(", "old_path", ")" ]
Merge /etc/portage/package.keywords and /etc/portage/package.accept_keywords.
[ "Merge", "/", "etc", "/", "portage", "/", "package", ".", "keywords", "and", "/", "etc", "/", "portage", "/", "package", ".", "accept_keywords", "." ]
python
train
saltstack/salt
salt/modules/syslog_ng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/syslog_ng.py#L975-L1009
def stop(name=None): ''' Kills syslog-ng. This function is intended to be used from the state module. Users shouldn't use this function, if the service module is available on their system. If :mod:`syslog_ng.set_config_file <salt.modules.syslog_ng.set_binary_path>` is called before, this function will use the set binary path. CLI Example: .. code-block:: bash salt '*' syslog_ng.stop ''' pids = __salt__['ps.pgrep'](pattern='syslog-ng') if not pids: return _format_state_result(name, result=False, comment='Syslog-ng is not running') if __opts__.get('test', False): comment = 'Syslog_ng state module will kill {0} pids' return _format_state_result(name, result=None, comment=comment) res = __salt__['ps.pkill']('syslog-ng') killed_pids = res['killed'] if killed_pids == pids: changes = {'old': killed_pids, 'new': []} return _format_state_result(name, result=True, changes=changes) else: return _format_state_result(name, result=False)
[ "def", "stop", "(", "name", "=", "None", ")", ":", "pids", "=", "__salt__", "[", "'ps.pgrep'", "]", "(", "pattern", "=", "'syslog-ng'", ")", "if", "not", "pids", ":", "return", "_format_state_result", "(", "name", ",", "result", "=", "False", ",", "comment", "=", "'Syslog-ng is not running'", ")", "if", "__opts__", ".", "get", "(", "'test'", ",", "False", ")", ":", "comment", "=", "'Syslog_ng state module will kill {0} pids'", "return", "_format_state_result", "(", "name", ",", "result", "=", "None", ",", "comment", "=", "comment", ")", "res", "=", "__salt__", "[", "'ps.pkill'", "]", "(", "'syslog-ng'", ")", "killed_pids", "=", "res", "[", "'killed'", "]", "if", "killed_pids", "==", "pids", ":", "changes", "=", "{", "'old'", ":", "killed_pids", ",", "'new'", ":", "[", "]", "}", "return", "_format_state_result", "(", "name", ",", "result", "=", "True", ",", "changes", "=", "changes", ")", "else", ":", "return", "_format_state_result", "(", "name", ",", "result", "=", "False", ")" ]
Kills syslog-ng. This function is intended to be used from the state module. Users shouldn't use this function, if the service module is available on their system. If :mod:`syslog_ng.set_config_file <salt.modules.syslog_ng.set_binary_path>` is called before, this function will use the set binary path. CLI Example: .. code-block:: bash salt '*' syslog_ng.stop
[ "Kills", "syslog", "-", "ng", ".", "This", "function", "is", "intended", "to", "be", "used", "from", "the", "state", "module", "." ]
python
train
kvh/ramp
ramp/estimators/base.py
https://github.com/kvh/ramp/blob/8618ce673e49b95f40c9659319c3cb72281dacac/ramp/estimators/base.py#L44-L48
def predict_maxprob(self, x, **kwargs): """ Most likely value. Generally equivalent to predict. """ return self.base_estimator_.predict(x.values, **kwargs)
[ "def", "predict_maxprob", "(", "self", ",", "x", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "base_estimator_", ".", "predict", "(", "x", ".", "values", ",", "*", "*", "kwargs", ")" ]
Most likely value. Generally equivalent to predict.
[ "Most", "likely", "value", ".", "Generally", "equivalent", "to", "predict", "." ]
python
train
cgoldberg/sauceclient
sauceclient.py
https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L135-L140
def get_subaccount_info(self): """Get information about a sub account.""" method = 'GET' endpoint = '/rest/v1/users/{}/subaccounts'.format( self.client.sauce_username) return self.client.request(method, endpoint)
[ "def", "get_subaccount_info", "(", "self", ")", ":", "method", "=", "'GET'", "endpoint", "=", "'/rest/v1/users/{}/subaccounts'", ".", "format", "(", "self", ".", "client", ".", "sauce_username", ")", "return", "self", ".", "client", ".", "request", "(", "method", ",", "endpoint", ")" ]
Get information about a sub account.
[ "Get", "information", "about", "a", "sub", "account", "." ]
python
train
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L1241-L1250
def compounds_to_frame(compounds, properties=None): """Construct a pandas :class:`~pandas.DataFrame` from a list of :class:`~pubchempy.Compound` objects. Optionally specify a list of the desired :class:`~pubchempy.Compound` properties. """ import pandas as pd if isinstance(compounds, Compound): compounds = [compounds] properties = set(properties) | set(['cid']) if properties else None return pd.DataFrame.from_records([c.to_dict(properties) for c in compounds], index='cid')
[ "def", "compounds_to_frame", "(", "compounds", ",", "properties", "=", "None", ")", ":", "import", "pandas", "as", "pd", "if", "isinstance", "(", "compounds", ",", "Compound", ")", ":", "compounds", "=", "[", "compounds", "]", "properties", "=", "set", "(", "properties", ")", "|", "set", "(", "[", "'cid'", "]", ")", "if", "properties", "else", "None", "return", "pd", ".", "DataFrame", ".", "from_records", "(", "[", "c", ".", "to_dict", "(", "properties", ")", "for", "c", "in", "compounds", "]", ",", "index", "=", "'cid'", ")" ]
Construct a pandas :class:`~pandas.DataFrame` from a list of :class:`~pubchempy.Compound` objects. Optionally specify a list of the desired :class:`~pubchempy.Compound` properties.
[ "Construct", "a", "pandas", ":", "class", ":", "~pandas", ".", "DataFrame", "from", "a", "list", "of", ":", "class", ":", "~pubchempy", ".", "Compound", "objects", "." ]
python
train
pavelsof/ipalint
ipalint/report.py
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/report.py#L69-L81
def _get_report(self, with_line_nums=True): """ Returns a report which includes each distinct error only once, together with a list of the input lines where the error occurs. The latter will be omitted if flag is set to False. Helper for the get_report method. """ templ = '{} ← {}' if with_line_nums else '{}' return '\n'.join([ templ.format(error.string, ','.join(map(str, sorted(set(lines))))) for error, lines in self.errors.items()])
[ "def", "_get_report", "(", "self", ",", "with_line_nums", "=", "True", ")", ":", "templ", "=", "'{} ← {}' i", " w", "th_line_nums e", "se '", "}'", "return", "'\\n'", ".", "join", "(", "[", "templ", ".", "format", "(", "error", ".", "string", ",", "','", ".", "join", "(", "map", "(", "str", ",", "sorted", "(", "set", "(", "lines", ")", ")", ")", ")", ")", "for", "error", ",", "lines", "in", "self", ".", "errors", ".", "items", "(", ")", "]", ")" ]
Returns a report which includes each distinct error only once, together with a list of the input lines where the error occurs. The latter will be omitted if flag is set to False. Helper for the get_report method.
[ "Returns", "a", "report", "which", "includes", "each", "distinct", "error", "only", "once", "together", "with", "a", "list", "of", "the", "input", "lines", "where", "the", "error", "occurs", ".", "The", "latter", "will", "be", "omitted", "if", "flag", "is", "set", "to", "False", "." ]
python
train