text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def set_value(self, field_name, value):
""" sets an attribute value for a given field name """
if field_name in self.fields:
if not value is None:
self._dict['attributes'][field_name] = _unicode_convert(value)
else:
pass
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
if isinstance(value, dict):
if 'geometry' in value:
self._dict['geometry'] = value['geometry']
elif any(k in value.keys() for k in ['x','y','points','paths','rings', 'spatialReference']):
self._dict['geometry'] = value
elif isinstance(value, AbstractGeometry):
self._dict['geometry'] = value.asDictionary
elif arcpyFound:
if isinstance(value, arcpy.Geometry) and \
value.type == self.geometryType:
self._dict['geometry']=json.loads(value.JSON)
self._geom = None
self._geom = self.geometry
else:
return False
self._json = json.dumps(self._dict, default=_date_handler)
return True
|
[
"def",
"set_value",
"(",
"self",
",",
"field_name",
",",
"value",
")",
":",
"if",
"field_name",
"in",
"self",
".",
"fields",
":",
"if",
"not",
"value",
"is",
"None",
":",
"self",
".",
"_dict",
"[",
"'attributes'",
"]",
"[",
"field_name",
"]",
"=",
"_unicode_convert",
"(",
"value",
")",
"else",
":",
"pass",
"elif",
"field_name",
".",
"upper",
"(",
")",
"in",
"[",
"'SHAPE'",
",",
"'SHAPE@'",
",",
"\"GEOMETRY\"",
"]",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"'geometry'",
"in",
"value",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
"[",
"'geometry'",
"]",
"elif",
"any",
"(",
"k",
"in",
"value",
".",
"keys",
"(",
")",
"for",
"k",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'points'",
",",
"'paths'",
",",
"'rings'",
",",
"'spatialReference'",
"]",
")",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"AbstractGeometry",
")",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
".",
"asDictionary",
"elif",
"arcpyFound",
":",
"if",
"isinstance",
"(",
"value",
",",
"arcpy",
".",
"Geometry",
")",
"and",
"value",
".",
"type",
"==",
"self",
".",
"geometryType",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"json",
".",
"loads",
"(",
"value",
".",
"JSON",
")",
"self",
".",
"_geom",
"=",
"None",
"self",
".",
"_geom",
"=",
"self",
".",
"geometry",
"else",
":",
"return",
"False",
"self",
".",
"_json",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_dict",
",",
"default",
"=",
"_date_handler",
")",
"return",
"True"
] | 46.16 | 16.88 |
def n_tanks(Q_plant, sed_inputs=sed_dict):
"""Return the number of sedimentation tanks required for a given flow rate.
Parameters
----------
Q_plant : float
Total plant flow rate
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
int
Number of sedimentation tanks required for a given flow rate.
Examples
--------
>>> from aide_design.play import*
>>>
"""
q = q_tank(sed_inputs).magnitude
return (int(np.ceil(Q_plant / q)))
|
[
"def",
"n_tanks",
"(",
"Q_plant",
",",
"sed_inputs",
"=",
"sed_dict",
")",
":",
"q",
"=",
"q_tank",
"(",
"sed_inputs",
")",
".",
"magnitude",
"return",
"(",
"int",
"(",
"np",
".",
"ceil",
"(",
"Q_plant",
"/",
"q",
")",
")",
")"
] | 29.5 | 18.4 |
def generate_cloudformation_args(stack_name, parameters, tags, template,
capabilities=DEFAULT_CAPABILITIES,
change_set_type=None,
service_role=None,
stack_policy=None,
change_set_name=None):
"""Used to generate the args for common cloudformation API interactions.
This is used for create_stack/update_stack/create_change_set calls in
cloudformation.
Args:
stack_name (str): The fully qualified stack name in Cloudformation.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
template (:class:`stacker.provider.base.Template`): The template
object.
capabilities (list, optional): A list of capabilities to use when
updating Cloudformation.
change_set_type (str, optional): An optional change set type to use
with create_change_set.
service_role (str, optional): An optional service role to use when
interacting with Cloudformation.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
change_set_name (str, optional): An optional change set name to use
with create_change_set.
Returns:
dict: A dictionary of arguments to be used in the Cloudformation API
call.
"""
args = {
"StackName": stack_name,
"Parameters": parameters,
"Tags": tags,
"Capabilities": capabilities,
}
if service_role:
args["RoleARN"] = service_role
if change_set_name:
args["ChangeSetName"] = change_set_name
if change_set_type:
args["ChangeSetType"] = change_set_type
if template.url:
args["TemplateURL"] = template.url
else:
args["TemplateBody"] = template.body
# When creating args for CreateChangeSet, don't include the stack policy,
# since ChangeSets don't support it.
if not change_set_name:
args.update(generate_stack_policy_args(stack_policy))
return args
|
[
"def",
"generate_cloudformation_args",
"(",
"stack_name",
",",
"parameters",
",",
"tags",
",",
"template",
",",
"capabilities",
"=",
"DEFAULT_CAPABILITIES",
",",
"change_set_type",
"=",
"None",
",",
"service_role",
"=",
"None",
",",
"stack_policy",
"=",
"None",
",",
"change_set_name",
"=",
"None",
")",
":",
"args",
"=",
"{",
"\"StackName\"",
":",
"stack_name",
",",
"\"Parameters\"",
":",
"parameters",
",",
"\"Tags\"",
":",
"tags",
",",
"\"Capabilities\"",
":",
"capabilities",
",",
"}",
"if",
"service_role",
":",
"args",
"[",
"\"RoleARN\"",
"]",
"=",
"service_role",
"if",
"change_set_name",
":",
"args",
"[",
"\"ChangeSetName\"",
"]",
"=",
"change_set_name",
"if",
"change_set_type",
":",
"args",
"[",
"\"ChangeSetType\"",
"]",
"=",
"change_set_type",
"if",
"template",
".",
"url",
":",
"args",
"[",
"\"TemplateURL\"",
"]",
"=",
"template",
".",
"url",
"else",
":",
"args",
"[",
"\"TemplateBody\"",
"]",
"=",
"template",
".",
"body",
"# When creating args for CreateChangeSet, don't include the stack policy,",
"# since ChangeSets don't support it.",
"if",
"not",
"change_set_name",
":",
"args",
".",
"update",
"(",
"generate_stack_policy_args",
"(",
"stack_policy",
")",
")",
"return",
"args"
] | 37.868852 | 22.180328 |
def _update_awareness(self):
"""Make sure all metabolites and genes that are associated with
this reaction are aware of it.
"""
for x in self._metabolites:
x._reaction.add(self)
for x in self._genes:
x._reaction.add(self)
|
[
"def",
"_update_awareness",
"(",
"self",
")",
":",
"for",
"x",
"in",
"self",
".",
"_metabolites",
":",
"x",
".",
"_reaction",
".",
"add",
"(",
"self",
")",
"for",
"x",
"in",
"self",
".",
"_genes",
":",
"x",
".",
"_reaction",
".",
"add",
"(",
"self",
")"
] | 30.888889 | 9.333333 |
def count_words(text, to_lower=True, delimiters=DEFAULT_DELIMITERS):
"""
If `text` is an SArray of strings or an SArray of lists of strings, the
occurances of word are counted for each row in the SArray.
If `text` is an SArray of dictionaries, the keys are tokenized and the
values are the counts. Counts for the same word, in the same row, are
added together.
This output is commonly known as the "bag-of-words" representation of text
data.
Parameters
----------
text : SArray[str | dict | list]
SArray of type: string, dict or list.
to_lower : bool, optional
If True, all strings are converted to lower case before counting.
delimiters : list[str], None, optional
Input strings are tokenized using `delimiters` characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[dict]
An SArray with the same length as the`text` input. For each row, the keys
of the dictionary are the words and the values are the corresponding counts.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Bag of words model <http://en.wikipedia.org/wiki/Bag-of-words_model>`_
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps.",
"Word word WORD, word!!!word"])
# Run count_words
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'quick': 1, 'brown': 1, 'the': 1, 'fox': 1, 'jumps.': 1},
{'word,': 5}]
# Run count_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.count_words(sa, delimiters=None)
dtype: dict
Rows: 2
[{'brown': 1, 'jumps': 1, 'fox': 1, '.': 1, 'quick': 1, 'the': 1},
{'word': 3, 'word!!!word': 1, ',': 1}]
# Run count_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 0.5},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bob': 1.5, 'alice': 1.5}, {'a': 5, 'dog': 5, 'cat': 5}]
# Run count_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah'], ['a dog', 'a dog cat']])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bar': 1, 'bah': 1, 'one': 1}, {'a': 2, 'dog': 2, 'cat': 1}]
"""
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.WordCounter(features='docs',
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
output_sf = fe.fit_transform(sf)
return output_sf['docs']
|
[
"def",
"count_words",
"(",
"text",
",",
"to_lower",
"=",
"True",
",",
"delimiters",
"=",
"DEFAULT_DELIMITERS",
")",
":",
"_raise_error_if_not_sarray",
"(",
"text",
",",
"\"text\"",
")",
"## Compute word counts",
"sf",
"=",
"_turicreate",
".",
"SFrame",
"(",
"{",
"'docs'",
":",
"text",
"}",
")",
"fe",
"=",
"_feature_engineering",
".",
"WordCounter",
"(",
"features",
"=",
"'docs'",
",",
"to_lower",
"=",
"to_lower",
",",
"delimiters",
"=",
"delimiters",
",",
"output_column_prefix",
"=",
"None",
")",
"output_sf",
"=",
"fe",
".",
"fit_transform",
"(",
"sf",
")",
"return",
"output_sf",
"[",
"'docs'",
"]"
] | 35.457447 | 26.542553 |
def print_to_stdout(level, str_out):
""" The default debug function """
if level == NOTICE:
col = Fore.GREEN
elif level == WARNING:
col = Fore.RED
else:
col = Fore.YELLOW
if not is_py3:
str_out = str_out.encode(encoding, 'replace')
print((col + str_out + Fore.RESET))
|
[
"def",
"print_to_stdout",
"(",
"level",
",",
"str_out",
")",
":",
"if",
"level",
"==",
"NOTICE",
":",
"col",
"=",
"Fore",
".",
"GREEN",
"elif",
"level",
"==",
"WARNING",
":",
"col",
"=",
"Fore",
".",
"RED",
"else",
":",
"col",
"=",
"Fore",
".",
"YELLOW",
"if",
"not",
"is_py3",
":",
"str_out",
"=",
"str_out",
".",
"encode",
"(",
"encoding",
",",
"'replace'",
")",
"print",
"(",
"(",
"col",
"+",
"str_out",
"+",
"Fore",
".",
"RESET",
")",
")"
] | 28.454545 | 13.727273 |
def getDateFields(fc):
"""
Returns a list of fields that are of type DATE
Input:
fc - feature class or table path
Output:
List of date field names as strings
"""
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return [field.name for field in arcpy.ListFields(fc, field_type="Date")]
|
[
"def",
"getDateFields",
"(",
"fc",
")",
":",
"if",
"arcpyFound",
"==",
"False",
":",
"raise",
"Exception",
"(",
"\"ArcPy is required to use this function\"",
")",
"return",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"arcpy",
".",
"ListFields",
"(",
"fc",
",",
"field_type",
"=",
"\"Date\"",
")",
"]"
] | 33.727273 | 15 |
def copy_previous_results(self):
"""Use the latest valid results_dir as the starting contents of the current results_dir.
Should be called after the cache is checked, since previous_results are not useful if there is
a cached artifact.
"""
# TODO(mateo): This should probably be managed by the task, which manages the rest of the
# incremental support.
if not self.previous_cache_key:
return None
previous_path = self._cache_manager._results_dir_path(self.previous_cache_key, stable=False)
if os.path.isdir(previous_path):
self.is_incremental = True
safe_rmtree(self._current_results_dir)
shutil.copytree(previous_path, self._current_results_dir)
safe_mkdir(self._current_results_dir)
relative_symlink(self._current_results_dir, self.results_dir)
# Set the self._previous last, so that it is only True after the copy completed.
self._previous_results_dir = previous_path
|
[
"def",
"copy_previous_results",
"(",
"self",
")",
":",
"# TODO(mateo): This should probably be managed by the task, which manages the rest of the",
"# incremental support.",
"if",
"not",
"self",
".",
"previous_cache_key",
":",
"return",
"None",
"previous_path",
"=",
"self",
".",
"_cache_manager",
".",
"_results_dir_path",
"(",
"self",
".",
"previous_cache_key",
",",
"stable",
"=",
"False",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"previous_path",
")",
":",
"self",
".",
"is_incremental",
"=",
"True",
"safe_rmtree",
"(",
"self",
".",
"_current_results_dir",
")",
"shutil",
".",
"copytree",
"(",
"previous_path",
",",
"self",
".",
"_current_results_dir",
")",
"safe_mkdir",
"(",
"self",
".",
"_current_results_dir",
")",
"relative_symlink",
"(",
"self",
".",
"_current_results_dir",
",",
"self",
".",
"results_dir",
")",
"# Set the self._previous last, so that it is only True after the copy completed.",
"self",
".",
"_previous_results_dir",
"=",
"previous_path"
] | 48.894737 | 20.526316 |
def clean_queues(self):
# pylint: disable=too-many-locals
"""Reduces internal list size to max allowed
* checks and broks : 5 * length of hosts + services
* actions : 5 * length of hosts + services + contacts
:return: None
"""
# If we set the interval at 0, we bail out
if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0:
logger.debug("No queues cleaning...")
return
max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services))
max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services))
max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) +
len(self.services))
# For checks, it's not very simple:
# For checks, they may be referred to their host/service
# We do not just del them in the check list, but also in their service/host
# We want id of lower than max_id - 2*max_checks
self.nb_checks_dropped = 0
if max_checks and len(self.checks) > max_checks:
# keys does not ensure sorted keys. Max is slow but we have no other way.
to_del_checks = [c for c in list(self.checks.values())]
to_del_checks.sort(key=lambda x: x.creation_time)
to_del_checks = to_del_checks[:-max_checks]
self.nb_checks_dropped = len(to_del_checks)
if to_del_checks:
logger.warning("I have to drop some checks (%d)..., sorry :(",
self.nb_checks_dropped)
for chk in to_del_checks:
c_id = chk.uuid
items = getattr(self, chk.ref_type + 's')
elt = items[chk.ref]
# First remove the link in host/service
elt.remove_in_progress_check(chk)
# Then in dependent checks (I depend on, or check
# depend on me)
for dependent_checks in chk.depend_on_me:
dependent_checks.depend_on.remove(chk.uuid)
for c_temp in chk.depend_on:
c_temp.depend_on_me.remove(chk)
del self.checks[c_id] # Final Bye bye ...
# For broks and actions, it's more simple
# or broks, manage global but also all brokers
self.nb_broks_dropped = 0
for broker_link in list(self.my_daemon.brokers.values()):
if max_broks and len(broker_link.broks) > max_broks:
logger.warning("I have to drop some broks (%d > %d) for the broker %s "
"..., sorry :(", len(broker_link.broks), max_broks, broker_link)
kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time)
# Delete the oldest broks to keep the max_broks most recent...
# todo: is it a good choice !
broker_link.broks = kept_broks[0:max_broks]
self.nb_actions_dropped = 0
if max_actions and len(self.actions) > max_actions:
logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(",
len(self.actions), max_actions)
to_del_actions = [c for c in list(self.actions.values())]
to_del_actions.sort(key=lambda x: x.creation_time)
to_del_actions = to_del_actions[:-max_actions]
self.nb_actions_dropped = len(to_del_actions)
for act in to_del_actions:
if act.is_a == 'notification':
self.find_item_by_id(act.ref).remove_in_progress_notification(act)
del self.actions[act.uuid]
|
[
"def",
"clean_queues",
"(",
"self",
")",
":",
"# pylint: disable=too-many-locals",
"# If we set the interval at 0, we bail out",
"if",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"'tick_clean_queues'",
",",
"0",
")",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"\"No queues cleaning...\"",
")",
"return",
"max_checks",
"=",
"MULTIPLIER_MAX_CHECKS",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"max_broks",
"=",
"MULTIPLIER_MAX_BROKS",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"max_actions",
"=",
"MULTIPLIER_MAX_ACTIONS",
"*",
"len",
"(",
"self",
".",
"contacts",
")",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"# For checks, it's not very simple:",
"# For checks, they may be referred to their host/service",
"# We do not just del them in the check list, but also in their service/host",
"# We want id of lower than max_id - 2*max_checks",
"self",
".",
"nb_checks_dropped",
"=",
"0",
"if",
"max_checks",
"and",
"len",
"(",
"self",
".",
"checks",
")",
">",
"max_checks",
":",
"# keys does not ensure sorted keys. Max is slow but we have no other way.",
"to_del_checks",
"=",
"[",
"c",
"for",
"c",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
"]",
"to_del_checks",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"to_del_checks",
"=",
"to_del_checks",
"[",
":",
"-",
"max_checks",
"]",
"self",
".",
"nb_checks_dropped",
"=",
"len",
"(",
"to_del_checks",
")",
"if",
"to_del_checks",
":",
"logger",
".",
"warning",
"(",
"\"I have to drop some checks (%d)..., sorry :(\"",
",",
"self",
".",
"nb_checks_dropped",
")",
"for",
"chk",
"in",
"to_del_checks",
":",
"c_id",
"=",
"chk",
".",
"uuid",
"items",
"=",
"getattr",
"(",
"self",
",",
"chk",
".",
"ref_type",
"+",
"'s'",
")",
"elt",
"=",
"items",
"[",
"chk",
".",
"ref",
"]",
"# First remove the link in host/service",
"elt",
".",
"remove_in_progress_check",
"(",
"chk",
")",
"# Then in dependent checks (I depend on, or check",
"# depend on me)",
"for",
"dependent_checks",
"in",
"chk",
".",
"depend_on_me",
":",
"dependent_checks",
".",
"depend_on",
".",
"remove",
"(",
"chk",
".",
"uuid",
")",
"for",
"c_temp",
"in",
"chk",
".",
"depend_on",
":",
"c_temp",
".",
"depend_on_me",
".",
"remove",
"(",
"chk",
")",
"del",
"self",
".",
"checks",
"[",
"c_id",
"]",
"# Final Bye bye ...",
"# For broks and actions, it's more simple",
"# or broks, manage global but also all brokers",
"self",
".",
"nb_broks_dropped",
"=",
"0",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"if",
"max_broks",
"and",
"len",
"(",
"broker_link",
".",
"broks",
")",
">",
"max_broks",
":",
"logger",
".",
"warning",
"(",
"\"I have to drop some broks (%d > %d) for the broker %s \"",
"\"..., sorry :(\"",
",",
"len",
"(",
"broker_link",
".",
"broks",
")",
",",
"max_broks",
",",
"broker_link",
")",
"kept_broks",
"=",
"sorted",
"(",
"broker_link",
".",
"broks",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"# Delete the oldest broks to keep the max_broks most recent...",
"# todo: is it a good choice !",
"broker_link",
".",
"broks",
"=",
"kept_broks",
"[",
"0",
":",
"max_broks",
"]",
"self",
".",
"nb_actions_dropped",
"=",
"0",
"if",
"max_actions",
"and",
"len",
"(",
"self",
".",
"actions",
")",
">",
"max_actions",
":",
"logger",
".",
"warning",
"(",
"\"I have to del some actions (currently: %d, max: %d)..., sorry :(\"",
",",
"len",
"(",
"self",
".",
"actions",
")",
",",
"max_actions",
")",
"to_del_actions",
"=",
"[",
"c",
"for",
"c",
"in",
"list",
"(",
"self",
".",
"actions",
".",
"values",
"(",
")",
")",
"]",
"to_del_actions",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"to_del_actions",
"=",
"to_del_actions",
"[",
":",
"-",
"max_actions",
"]",
"self",
".",
"nb_actions_dropped",
"=",
"len",
"(",
"to_del_actions",
")",
"for",
"act",
"in",
"to_del_actions",
":",
"if",
"act",
".",
"is_a",
"==",
"'notification'",
":",
"self",
".",
"find_item_by_id",
"(",
"act",
".",
"ref",
")",
".",
"remove_in_progress_notification",
"(",
"act",
")",
"del",
"self",
".",
"actions",
"[",
"act",
".",
"uuid",
"]"
] | 50.847222 | 22.027778 |
def setColor(self, typeID, color):
"""setColor(string, (integer, integer, integer, integer)) -> None
Sets the color of this type.
"""
self._connection._beginMessage(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_COLOR, typeID, 1 + 1 + 1 + 1 + 1)
self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int(
color[0]), int(color[1]), int(color[2]), int(color[3]))
self._connection._sendExact()
|
[
"def",
"setColor",
"(",
"self",
",",
"typeID",
",",
"color",
")",
":",
"self",
".",
"_connection",
".",
"_beginMessage",
"(",
"tc",
".",
"CMD_SET_VEHICLETYPE_VARIABLE",
",",
"tc",
".",
"VAR_COLOR",
",",
"typeID",
",",
"1",
"+",
"1",
"+",
"1",
"+",
"1",
"+",
"1",
")",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!BBBBB\"",
",",
"tc",
".",
"TYPE_COLOR",
",",
"int",
"(",
"color",
"[",
"0",
"]",
")",
",",
"int",
"(",
"color",
"[",
"1",
"]",
")",
",",
"int",
"(",
"color",
"[",
"2",
"]",
")",
",",
"int",
"(",
"color",
"[",
"3",
"]",
")",
")",
"self",
".",
"_connection",
".",
"_sendExact",
"(",
")"
] | 45.9 | 16.3 |
def git_fetch(repo_dir, remote=None, refspec=None, verbose=False, tags=True):
"""Do a git fetch of `refspec` in `repo_dir`.
If 'remote' is None, all remotes will be fetched.
"""
command = ['git', 'fetch']
if not remote:
command.append('--all')
else:
remote = pipes.quote(remote)
command.extend(['--update-head-ok'])
if tags:
command.append('--tags')
if verbose:
command.append('--verbose')
if remote:
command.append(remote)
if refspec:
command.append(refspec)
return execute_git_command(command, repo_dir=repo_dir)
|
[
"def",
"git_fetch",
"(",
"repo_dir",
",",
"remote",
"=",
"None",
",",
"refspec",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"tags",
"=",
"True",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'fetch'",
"]",
"if",
"not",
"remote",
":",
"command",
".",
"append",
"(",
"'--all'",
")",
"else",
":",
"remote",
"=",
"pipes",
".",
"quote",
"(",
"remote",
")",
"command",
".",
"extend",
"(",
"[",
"'--update-head-ok'",
"]",
")",
"if",
"tags",
":",
"command",
".",
"append",
"(",
"'--tags'",
")",
"if",
"verbose",
":",
"command",
".",
"append",
"(",
"'--verbose'",
")",
"if",
"remote",
":",
"command",
".",
"append",
"(",
"remote",
")",
"if",
"refspec",
":",
"command",
".",
"append",
"(",
"refspec",
")",
"return",
"execute_git_command",
"(",
"command",
",",
"repo_dir",
"=",
"repo_dir",
")"
] | 29.6 | 16 |
def p_expr_pre_incdec(p):
'''expr : INC variable
| DEC variable'''
p[0] = ast.PreIncDecOp(p[1], p[2], lineno=p.lineno(1))
|
[
"def",
"p_expr_pre_incdec",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"PreIncDecOp",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 34.5 | 14.5 |
def parse_sentence(obj: dict) -> BioCSentence:
"""Deserialize a dict obj to a BioCSentence object"""
sentence = BioCSentence()
sentence.offset = obj['offset']
sentence.infons = obj['infons']
sentence.text = obj['text']
for annotation in obj['annotations']:
sentence.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
sentence.add_relation(parse_relation(relation))
return sentence
|
[
"def",
"parse_sentence",
"(",
"obj",
":",
"dict",
")",
"->",
"BioCSentence",
":",
"sentence",
"=",
"BioCSentence",
"(",
")",
"sentence",
".",
"offset",
"=",
"obj",
"[",
"'offset'",
"]",
"sentence",
".",
"infons",
"=",
"obj",
"[",
"'infons'",
"]",
"sentence",
".",
"text",
"=",
"obj",
"[",
"'text'",
"]",
"for",
"annotation",
"in",
"obj",
"[",
"'annotations'",
"]",
":",
"sentence",
".",
"add_annotation",
"(",
"parse_annotation",
"(",
"annotation",
")",
")",
"for",
"relation",
"in",
"obj",
"[",
"'relations'",
"]",
":",
"sentence",
".",
"add_relation",
"(",
"parse_relation",
"(",
"relation",
")",
")",
"return",
"sentence"
] | 41.454545 | 8.727273 |
def set_jobs(self, jobs):
"""Set --jobs."""
if jobs == "sys":
self.jobs = None
else:
try:
jobs = int(jobs)
except ValueError:
jobs = -1 # will raise error below
if jobs < 0:
raise CoconutException("--jobs must be an integer >= 0 or 'sys'")
self.jobs = jobs
|
[
"def",
"set_jobs",
"(",
"self",
",",
"jobs",
")",
":",
"if",
"jobs",
"==",
"\"sys\"",
":",
"self",
".",
"jobs",
"=",
"None",
"else",
":",
"try",
":",
"jobs",
"=",
"int",
"(",
"jobs",
")",
"except",
"ValueError",
":",
"jobs",
"=",
"-",
"1",
"# will raise error below",
"if",
"jobs",
"<",
"0",
":",
"raise",
"CoconutException",
"(",
"\"--jobs must be an integer >= 0 or 'sys'\"",
")",
"self",
".",
"jobs",
"=",
"jobs"
] | 31.5 | 15.916667 |
def _normalize(number):
"""Normalizes a string of characters representing a phone number.
This performs the following conversions:
- Punctuation is stripped.
- For ALPHA/VANITY numbers:
- Letters are converted to their numeric representation on a telephone
keypad. The keypad used here is the one defined in ITU
Recommendation E.161. This is only done if there are 3 or more
letters in the number, to lessen the risk that such letters are
typos.
- For other numbers:
- Wide-ascii digits are converted to normal ASCII (European) digits.
- Arabic-Indic numerals are converted to European numerals.
- Spurious alpha characters are stripped.
Arguments:
number -- string representing a phone number
Returns the normalized string version of the phone number.
"""
m = fullmatch(_VALID_ALPHA_PHONE_PATTERN, number)
if m:
return _normalize_helper(number, _ALPHA_PHONE_MAPPINGS, True)
else:
return normalize_digits_only(number)
|
[
"def",
"_normalize",
"(",
"number",
")",
":",
"m",
"=",
"fullmatch",
"(",
"_VALID_ALPHA_PHONE_PATTERN",
",",
"number",
")",
"if",
"m",
":",
"return",
"_normalize_helper",
"(",
"number",
",",
"_ALPHA_PHONE_MAPPINGS",
",",
"True",
")",
"else",
":",
"return",
"normalize_digits_only",
"(",
"number",
")"
] | 39.769231 | 21.538462 |
def create_gre_tunnel_endpoint(cls, endpoint=None, tunnel_interface=None,
remote_address=None):
"""
Create the GRE tunnel mode or no encryption mode endpoint.
If the GRE tunnel mode endpoint is an SMC managed device,
both an endpoint and a tunnel interface is required. If the
endpoint is externally managed, only an IP address is required.
:param InternalEndpoint,ExternalEndpoint endpoint: the endpoint
element for this tunnel endpoint.
:param TunnelInterface tunnel_interface: the tunnel interface for
this tunnel endpoint. Required for SMC managed devices.
:param str remote_address: IP address, only required if the tunnel
endpoint is a remote gateway.
:rtype: TunnelEndpoint
"""
tunnel_interface = tunnel_interface.href if tunnel_interface else None
endpoint = endpoint.href if endpoint else None
return TunnelEndpoint(
tunnel_interface_ref=tunnel_interface,
endpoint_ref=endpoint,
ip_address=remote_address)
|
[
"def",
"create_gre_tunnel_endpoint",
"(",
"cls",
",",
"endpoint",
"=",
"None",
",",
"tunnel_interface",
"=",
"None",
",",
"remote_address",
"=",
"None",
")",
":",
"tunnel_interface",
"=",
"tunnel_interface",
".",
"href",
"if",
"tunnel_interface",
"else",
"None",
"endpoint",
"=",
"endpoint",
".",
"href",
"if",
"endpoint",
"else",
"None",
"return",
"TunnelEndpoint",
"(",
"tunnel_interface_ref",
"=",
"tunnel_interface",
",",
"endpoint_ref",
"=",
"endpoint",
",",
"ip_address",
"=",
"remote_address",
")"
] | 50.590909 | 18.681818 |
def count_peaks(ts):
"""
Toggle counter for gas boilers
Counts the number of times the gas consumption increases with more than 3kW
Parameters
----------
ts: Pandas Series
Gas consumption in minute resolution
Returns
-------
int
"""
on_toggles = ts.diff() > 3000
shifted = np.logical_not(on_toggles.shift(1))
result = on_toggles & shifted
count = result.sum()
return count
|
[
"def",
"count_peaks",
"(",
"ts",
")",
":",
"on_toggles",
"=",
"ts",
".",
"diff",
"(",
")",
">",
"3000",
"shifted",
"=",
"np",
".",
"logical_not",
"(",
"on_toggles",
".",
"shift",
"(",
"1",
")",
")",
"result",
"=",
"on_toggles",
"&",
"shifted",
"count",
"=",
"result",
".",
"sum",
"(",
")",
"return",
"count"
] | 20.190476 | 21.619048 |
def to_svg(self, instruction_or_id,
i_promise_not_to_change_the_result=False):
"""Return the SVG for an instruction.
:param instruction_or_id: either an
:class:`~knittingpattern.Instruction.Instruction` or an id
returned by :meth:`get_instruction_id`
:param bool i_promise_not_to_change_the_result:
- :obj:`False`: the result is copied, you can alter it.
- :obj:`True`: the result is directly from the cache. If you change
the result, other calls of this function get the changed result.
:return: an SVGDumper
:rtype: knittingpattern.Dumper.SVGDumper
"""
return self._new_svg_dumper(lambda: self.instruction_to_svg_dict(
instruction_or_id, not i_promise_not_to_change_the_result))
|
[
"def",
"to_svg",
"(",
"self",
",",
"instruction_or_id",
",",
"i_promise_not_to_change_the_result",
"=",
"False",
")",
":",
"return",
"self",
".",
"_new_svg_dumper",
"(",
"lambda",
":",
"self",
".",
"instruction_to_svg_dict",
"(",
"instruction_or_id",
",",
"not",
"i_promise_not_to_change_the_result",
")",
")"
] | 44.5 | 20.944444 |
def _api_request(self, endpoint, http_method, *args, **kwargs):
"""Private method for api requests"""
logger.debug(' > Sending API request to endpoint: %s' % endpoint)
auth = self._build_http_auth()
headers = self._build_request_headers(kwargs.get('headers'))
logger.debug('\theaders: %s' % headers)
path = self._build_request_path(endpoint)
logger.debug('\tpath: %s' % path)
data = self._build_payload(kwargs.get('payload'))
if not data:
data = kwargs.get('data')
logger.debug('\tdata: %s' % data)
req_kw = dict(
auth=auth,
headers=headers,
timeout=kwargs.get('timeout', self.DEFAULT_TIMEOUT)
)
# do some error handling
if (http_method == self.HTTP_POST):
if (data):
r = requests.post(path, data=data, **req_kw)
else:
r = requests.post(path, **req_kw)
elif http_method == self.HTTP_PUT:
if (data):
r = requests.put(path, data=data, **req_kw)
else:
r = requests.put(path, **req_kw)
elif http_method == self.HTTP_DELETE:
r = requests.delete(path, **req_kw)
else:
r = requests.get(path, **req_kw)
logger.debug('\tresponse code:%s' % r.status_code)
try:
logger.debug('\tresponse: %s' % r.json())
except:
logger.debug('\tresponse: %s' % r.content)
return self._parse_response(r)
|
[
"def",
"_api_request",
"(",
"self",
",",
"endpoint",
",",
"http_method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"' > Sending API request to endpoint: %s'",
"%",
"endpoint",
")",
"auth",
"=",
"self",
".",
"_build_http_auth",
"(",
")",
"headers",
"=",
"self",
".",
"_build_request_headers",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
")",
")",
"logger",
".",
"debug",
"(",
"'\\theaders: %s'",
"%",
"headers",
")",
"path",
"=",
"self",
".",
"_build_request_path",
"(",
"endpoint",
")",
"logger",
".",
"debug",
"(",
"'\\tpath: %s'",
"%",
"path",
")",
"data",
"=",
"self",
".",
"_build_payload",
"(",
"kwargs",
".",
"get",
"(",
"'payload'",
")",
")",
"if",
"not",
"data",
":",
"data",
"=",
"kwargs",
".",
"get",
"(",
"'data'",
")",
"logger",
".",
"debug",
"(",
"'\\tdata: %s'",
"%",
"data",
")",
"req_kw",
"=",
"dict",
"(",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'timeout'",
",",
"self",
".",
"DEFAULT_TIMEOUT",
")",
")",
"# do some error handling",
"if",
"(",
"http_method",
"==",
"self",
".",
"HTTP_POST",
")",
":",
"if",
"(",
"data",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"path",
",",
"data",
"=",
"data",
",",
"*",
"*",
"req_kw",
")",
"else",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"path",
",",
"*",
"*",
"req_kw",
")",
"elif",
"http_method",
"==",
"self",
".",
"HTTP_PUT",
":",
"if",
"(",
"data",
")",
":",
"r",
"=",
"requests",
".",
"put",
"(",
"path",
",",
"data",
"=",
"data",
",",
"*",
"*",
"req_kw",
")",
"else",
":",
"r",
"=",
"requests",
".",
"put",
"(",
"path",
",",
"*",
"*",
"req_kw",
")",
"elif",
"http_method",
"==",
"self",
".",
"HTTP_DELETE",
":",
"r",
"=",
"requests",
".",
"delete",
"(",
"path",
",",
"*",
"*",
"req_kw",
")",
"else",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"path",
",",
"*",
"*",
"req_kw",
")",
"logger",
".",
"debug",
"(",
"'\\tresponse code:%s'",
"%",
"r",
".",
"status_code",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"'\\tresponse: %s'",
"%",
"r",
".",
"json",
"(",
")",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'\\tresponse: %s'",
"%",
"r",
".",
"content",
")",
"return",
"self",
".",
"_parse_response",
"(",
"r",
")"
] | 32.891304 | 18.695652 |
def _print_download_progress_msg(self, msg, flush=False):
"""Prints a message about download progress either to the console or TF log.
Args:
msg: Message to print.
flush: Indicates whether to flush the output (only used in interactive
mode).
"""
if self._interactive_mode():
# Print progress message to console overwriting previous progress
# message.
self._max_prog_str = max(self._max_prog_str, len(msg))
sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg)
sys.stdout.flush()
if flush:
print("\n")
else:
# Interactive progress tracking is disabled. Print progress to the
# standard TF log.
logging.info(msg)
|
[
"def",
"_print_download_progress_msg",
"(",
"self",
",",
"msg",
",",
"flush",
"=",
"False",
")",
":",
"if",
"self",
".",
"_interactive_mode",
"(",
")",
":",
"# Print progress message to console overwriting previous progress",
"# message.",
"self",
".",
"_max_prog_str",
"=",
"max",
"(",
"self",
".",
"_max_prog_str",
",",
"len",
"(",
"msg",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r%-{}s\"",
".",
"format",
"(",
"self",
".",
"_max_prog_str",
")",
"%",
"msg",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"flush",
":",
"print",
"(",
"\"\\n\"",
")",
"else",
":",
"# Interactive progress tracking is disabled. Print progress to the",
"# standard TF log.",
"logging",
".",
"info",
"(",
"msg",
")"
] | 35.35 | 21.2 |
def copy_resources(self):
"""Copies the relevant resources to a resources subdirectory"""
if not os.path.isdir('resources'):
os.mkdir('resources')
resource_dir = os.path.join(os.getcwd(), 'resources', '')
copied_resources = []
for resource in self.resources:
src = os.path.join(EULER_DATA, 'resources', resource)
if os.path.isfile(src):
shutil.copy(src, resource_dir)
copied_resources.append(resource)
if copied_resources:
copied = ', '.join(copied_resources)
path = os.path.relpath(resource_dir, os.pardir)
msg = "Copied {} to {}.".format(copied, path)
click.secho(msg, fg='green')
|
[
"def",
"copy_resources",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"'resources'",
")",
":",
"os",
".",
"mkdir",
"(",
"'resources'",
")",
"resource_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'resources'",
",",
"''",
")",
"copied_resources",
"=",
"[",
"]",
"for",
"resource",
"in",
"self",
".",
"resources",
":",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"EULER_DATA",
",",
"'resources'",
",",
"resource",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"src",
")",
":",
"shutil",
".",
"copy",
"(",
"src",
",",
"resource_dir",
")",
"copied_resources",
".",
"append",
"(",
"resource",
")",
"if",
"copied_resources",
":",
"copied",
"=",
"', '",
".",
"join",
"(",
"copied_resources",
")",
"path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"resource_dir",
",",
"os",
".",
"pardir",
")",
"msg",
"=",
"\"Copied {} to {}.\"",
".",
"format",
"(",
"copied",
",",
"path",
")",
"click",
".",
"secho",
"(",
"msg",
",",
"fg",
"=",
"'green'",
")"
] | 36.55 | 16.1 |
def difference(iterable, func=sub):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
"""
a, b = tee(iterable)
try:
item = next(b)
except StopIteration:
return iter([])
return chain([item], map(lambda x: func(x[1], x[0]), zip(a, b)))
|
[
"def",
"difference",
"(",
"iterable",
",",
"func",
"=",
"sub",
")",
":",
"a",
",",
"b",
"=",
"tee",
"(",
"iterable",
")",
"try",
":",
"item",
"=",
"next",
"(",
"b",
")",
"except",
"StopIteration",
":",
"return",
"iter",
"(",
"[",
"]",
")",
"return",
"chain",
"(",
"[",
"item",
"]",
",",
"map",
"(",
"lambda",
"x",
":",
"func",
"(",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
")",
",",
"zip",
"(",
"a",
",",
"b",
")",
")",
")"
] | 30.277778 | 19 |
def search_by(lookup, tgt_type='compound', minion_id=None):
'''
Search a dictionary of target strings for matching targets
This is the inverse of :py:func:`match.filter_by
<salt.modules.match.filter_by>` and allows matching values instead of
matching keys. A minion can be matched by multiple entries.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' match.search_by '{web: [node1, node2], db: [node2, node]}'
Pillar Example:
.. code-block:: jinja
{% set roles = salt.match.search_by({
'web': ['G@os_family:Debian not nodeX'],
'db': ['L@node2,node3 and G@datacenter:west'],
'caching': ['node3', 'node4'],
}) %}
# Make the filtered data available to Pillar:
roles: {{ roles | yaml() }}
'''
expr_funcs = dict(inspect.getmembers(sys.modules[__name__],
predicate=inspect.isfunction))
matches = []
for key, target_list in lookup.items():
for target in target_list:
params = (target, minion_id) if minion_id else (target, )
if expr_funcs[tgt_type](*params):
matches.append(key)
return matches or None
|
[
"def",
"search_by",
"(",
"lookup",
",",
"tgt_type",
"=",
"'compound'",
",",
"minion_id",
"=",
"None",
")",
":",
"expr_funcs",
"=",
"dict",
"(",
"inspect",
".",
"getmembers",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"predicate",
"=",
"inspect",
".",
"isfunction",
")",
")",
"matches",
"=",
"[",
"]",
"for",
"key",
",",
"target_list",
"in",
"lookup",
".",
"items",
"(",
")",
":",
"for",
"target",
"in",
"target_list",
":",
"params",
"=",
"(",
"target",
",",
"minion_id",
")",
"if",
"minion_id",
"else",
"(",
"target",
",",
")",
"if",
"expr_funcs",
"[",
"tgt_type",
"]",
"(",
"*",
"params",
")",
":",
"matches",
".",
"append",
"(",
"key",
")",
"return",
"matches",
"or",
"None"
] | 29.475 | 23.225 |
def get_stp_mst_detail_output_msti_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_stp_mst_detail_output_msti_port_oper_bpdu_guard",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=",
"get_stp_mst_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_stp_mst_detail",
",",
"\"output\"",
")",
"msti",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"msti\"",
")",
"instance_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"instance-id\"",
")",
"instance_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'instance_id'",
")",
"port",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"port\"",
")",
"oper_bpdu_guard",
"=",
"ET",
".",
"SubElement",
"(",
"port",
",",
"\"oper-bpdu-guard\"",
")",
"oper_bpdu_guard",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'oper_bpdu_guard'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 45 | 14.5625 |
def invalid_config_error_message(action, key, val):
"""Returns a better error message when invalid configuration option
is provided."""
if action in ('store_true', 'store_false'):
return ("{0} is not a valid value for {1} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead.").format(val, key)
return ("{0} is not a valid value for {1} option, "
"please specify a numerical value like 1/0 "
"instead.").format(val, key)
|
[
"def",
"invalid_config_error_message",
"(",
"action",
",",
"key",
",",
"val",
")",
":",
"if",
"action",
"in",
"(",
"'store_true'",
",",
"'store_false'",
")",
":",
"return",
"(",
"\"{0} is not a valid value for {1} option, \"",
"\"please specify a boolean value like yes/no, \"",
"\"true/false or 1/0 instead.\"",
")",
".",
"format",
"(",
"val",
",",
"key",
")",
"return",
"(",
"\"{0} is not a valid value for {1} option, \"",
"\"please specify a numerical value like 1/0 \"",
"\"instead.\"",
")",
".",
"format",
"(",
"val",
",",
"key",
")"
] | 47.454545 | 13.818182 |
def send(self, transactions):
""" Package up transactions into a batch and send them to the
network via the provided batch_sender.
:param transactions: list of transactions to package and broadcast.
:return: None
"""
txn_signatures = [txn.header_signature for txn in transactions]
header = BatchHeader(
signer_public_key=self._identity_signer.get_public_key().as_hex(),
transaction_ids=txn_signatures
).SerializeToString()
signature = self._identity_signer.sign(header)
batch = Batch(
header=header,
transactions=transactions,
header_signature=signature)
self._batch_sender.send(batch)
|
[
"def",
"send",
"(",
"self",
",",
"transactions",
")",
":",
"txn_signatures",
"=",
"[",
"txn",
".",
"header_signature",
"for",
"txn",
"in",
"transactions",
"]",
"header",
"=",
"BatchHeader",
"(",
"signer_public_key",
"=",
"self",
".",
"_identity_signer",
".",
"get_public_key",
"(",
")",
".",
"as_hex",
"(",
")",
",",
"transaction_ids",
"=",
"txn_signatures",
")",
".",
"SerializeToString",
"(",
")",
"signature",
"=",
"self",
".",
"_identity_signer",
".",
"sign",
"(",
"header",
")",
"batch",
"=",
"Batch",
"(",
"header",
"=",
"header",
",",
"transactions",
"=",
"transactions",
",",
"header_signature",
"=",
"signature",
")",
"self",
".",
"_batch_sender",
".",
"send",
"(",
"batch",
")"
] | 37.736842 | 15.526316 |
def wallet_destroy(self, wallet):
"""
Destroys **wallet** and all contained accounts
.. enable_control required
:param wallet: Wallet to destroy
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_destroy(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_destroy', payload)
return resp == {}
|
[
"def",
"wallet_destroy",
"(",
"self",
",",
"wallet",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'wallet_destroy'",
",",
"payload",
")",
"return",
"resp",
"==",
"{",
"}"
] | 23.625 | 21.958333 |
def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None):
"""
Write out a MagIC table to file, using custom filename
as specified in self.filenames.
Parameters
----------
dtype : str
magic table name
"""
if custom_name:
fname = custom_name
else:
fname = self.filenames[dtype]
if not dir_path:
dir_path=self.directory
if dtype in self.tables:
write_df = self.remove_names(dtype)
outfile = self.tables[dtype].write_magic_file(custom_name=fname,
dir_path=dir_path,
append=append, df=write_df)
return outfile
|
[
"def",
"write_table_to_file",
"(",
"self",
",",
"dtype",
",",
"custom_name",
"=",
"None",
",",
"append",
"=",
"False",
",",
"dir_path",
"=",
"None",
")",
":",
"if",
"custom_name",
":",
"fname",
"=",
"custom_name",
"else",
":",
"fname",
"=",
"self",
".",
"filenames",
"[",
"dtype",
"]",
"if",
"not",
"dir_path",
":",
"dir_path",
"=",
"self",
".",
"directory",
"if",
"dtype",
"in",
"self",
".",
"tables",
":",
"write_df",
"=",
"self",
".",
"remove_names",
"(",
"dtype",
")",
"outfile",
"=",
"self",
".",
"tables",
"[",
"dtype",
"]",
".",
"write_magic_file",
"(",
"custom_name",
"=",
"fname",
",",
"dir_path",
"=",
"dir_path",
",",
"append",
"=",
"append",
",",
"df",
"=",
"write_df",
")",
"return",
"outfile"
] | 36.136364 | 18.590909 |
def read_data_sets(data_dir):
"""
Parse or download movielens 1m data if train_dir is empty.
:param data_dir: The directory storing the movielens data
:return: a 2D numpy array with user index and item index in each row
"""
WHOLE_DATA = 'ml-1m.zip'
local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA)
zip_ref = zipfile.ZipFile(local_file, 'r')
extracted_to = os.path.join(data_dir, "ml-1m")
if not os.path.exists(extracted_to):
print("Extracting %s to %s" % (local_file, data_dir))
zip_ref.extractall(data_dir)
zip_ref.close()
rating_files = os.path.join(extracted_to,"ratings.dat")
rating_list = [i.strip().split("::") for i in open(rating_files,"r").readlines()]
movielens_data = np.array(rating_list).astype(int)
return movielens_data
|
[
"def",
"read_data_sets",
"(",
"data_dir",
")",
":",
"WHOLE_DATA",
"=",
"'ml-1m.zip'",
"local_file",
"=",
"base",
".",
"maybe_download",
"(",
"WHOLE_DATA",
",",
"data_dir",
",",
"SOURCE_URL",
"+",
"WHOLE_DATA",
")",
"zip_ref",
"=",
"zipfile",
".",
"ZipFile",
"(",
"local_file",
",",
"'r'",
")",
"extracted_to",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"ml-1m\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"extracted_to",
")",
":",
"print",
"(",
"\"Extracting %s to %s\"",
"%",
"(",
"local_file",
",",
"data_dir",
")",
")",
"zip_ref",
".",
"extractall",
"(",
"data_dir",
")",
"zip_ref",
".",
"close",
"(",
")",
"rating_files",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extracted_to",
",",
"\"ratings.dat\"",
")",
"rating_list",
"=",
"[",
"i",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"::\"",
")",
"for",
"i",
"in",
"open",
"(",
"rating_files",
",",
"\"r\"",
")",
".",
"readlines",
"(",
")",
"]",
"movielens_data",
"=",
"np",
".",
"array",
"(",
"rating_list",
")",
".",
"astype",
"(",
"int",
")",
"return",
"movielens_data"
] | 41.7 | 18.9 |
def _get_cache_name(function):
"""
returns a name for the module's cache db.
"""
module_name = _inspect.getfile(function)
module_name = _os.path.abspath(module_name)
cache_name = module_name
# fix for '<string>' or '<stdin>' in exec or interpreter usage.
cache_name = cache_name.replace('<', '_lt_')
cache_name = cache_name.replace('>', '_gt_')
tmpdir = _os.getenv('TMPDIR') or _os.getenv('TEMP') or _os.getenv('TMP')
if tmpdir:
cache_name = tmpdir + '/filecache_' + cache_name.replace(_os.sep, '@')
cache_name += '.cache'
return cache_name
|
[
"def",
"_get_cache_name",
"(",
"function",
")",
":",
"module_name",
"=",
"_inspect",
".",
"getfile",
"(",
"function",
")",
"module_name",
"=",
"_os",
".",
"path",
".",
"abspath",
"(",
"module_name",
")",
"cache_name",
"=",
"module_name",
"# fix for '<string>' or '<stdin>' in exec or interpreter usage.",
"cache_name",
"=",
"cache_name",
".",
"replace",
"(",
"'<'",
",",
"'_lt_'",
")",
"cache_name",
"=",
"cache_name",
".",
"replace",
"(",
"'>'",
",",
"'_gt_'",
")",
"tmpdir",
"=",
"_os",
".",
"getenv",
"(",
"'TMPDIR'",
")",
"or",
"_os",
".",
"getenv",
"(",
"'TEMP'",
")",
"or",
"_os",
".",
"getenv",
"(",
"'TMP'",
")",
"if",
"tmpdir",
":",
"cache_name",
"=",
"tmpdir",
"+",
"'/filecache_'",
"+",
"cache_name",
".",
"replace",
"(",
"_os",
".",
"sep",
",",
"'@'",
")",
"cache_name",
"+=",
"'.cache'",
"return",
"cache_name"
] | 32.555556 | 18.555556 |
def hook_wrapper_23(stdin, stdout, prompt):
u'''Wrap a Python readline so it behaves like GNU readline.'''
try:
# call the Python hook
res = ensure_str(readline_hook(prompt))
# make sure it returned the right sort of thing
if res and not isinstance(res, str):
raise TypeError, u'readline must return a string.'
except KeyboardInterrupt:
# GNU readline returns 0 on keyboard interrupt
return 0
except EOFError:
# It returns an empty string on EOF
res = u''
except:
print >>sys.stderr, u'Readline internal error'
traceback.print_exc()
res = u'\n'
# we have to make a copy because the caller expects to free the result
n = len(res)
p = Console.PyMem_Malloc(n + 1)
_strncpy(cast(p, c_char_p), res, n + 1)
return p
|
[
"def",
"hook_wrapper_23",
"(",
"stdin",
",",
"stdout",
",",
"prompt",
")",
":",
"try",
":",
"# call the Python hook\r",
"res",
"=",
"ensure_str",
"(",
"readline_hook",
"(",
"prompt",
")",
")",
"# make sure it returned the right sort of thing\r",
"if",
"res",
"and",
"not",
"isinstance",
"(",
"res",
",",
"str",
")",
":",
"raise",
"TypeError",
",",
"u'readline must return a string.'",
"except",
"KeyboardInterrupt",
":",
"# GNU readline returns 0 on keyboard interrupt\r",
"return",
"0",
"except",
"EOFError",
":",
"# It returns an empty string on EOF\r",
"res",
"=",
"u''",
"except",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"u'Readline internal error'",
"traceback",
".",
"print_exc",
"(",
")",
"res",
"=",
"u'\\n'",
"# we have to make a copy because the caller expects to free the result\r",
"n",
"=",
"len",
"(",
"res",
")",
"p",
"=",
"Console",
".",
"PyMem_Malloc",
"(",
"n",
"+",
"1",
")",
"_strncpy",
"(",
"cast",
"(",
"p",
",",
"c_char_p",
")",
",",
"res",
",",
"n",
"+",
"1",
")",
"return",
"p"
] | 36.913043 | 16.652174 |
def parse(self):
"""Get new data, parses it and returns a device."""
# fetch data
sess = requests.session()
request = sess.get('https://{}/{}'.format(self.locale,
self.product_id),
allow_redirects=True,
timeout=2)
sess.close()
# raise exception, e.g. if we are blocked because of too many requests
request.raise_for_status()
soup = BeautifulSoup(request.text, 'html.parser')
# parse name
raw = soup.find('h1', attrs={'class': 'gh-headline'})
self.device.name = raw.string.replace('\n', '')
# parse prices
self.device.prices = []
for tmp in soup.select('div.offer__price .gh_price'):
matches = re.search(_REGEX, tmp.string)
raw = '{}.{}'.format(matches.group(1),
matches.group(2))
self.device.prices += [float(raw)]
# parse unit
price_match = soup.find('span', attrs={'class': 'gh_price'})
matches = re.search(r'€|£|PLN', price_match.string)
self.device.price_currency = matches.group()
return self.device
|
[
"def",
"parse",
"(",
"self",
")",
":",
"# fetch data",
"sess",
"=",
"requests",
".",
"session",
"(",
")",
"request",
"=",
"sess",
".",
"get",
"(",
"'https://{}/{}'",
".",
"format",
"(",
"self",
".",
"locale",
",",
"self",
".",
"product_id",
")",
",",
"allow_redirects",
"=",
"True",
",",
"timeout",
"=",
"2",
")",
"sess",
".",
"close",
"(",
")",
"# raise exception, e.g. if we are blocked because of too many requests",
"request",
".",
"raise_for_status",
"(",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"request",
".",
"text",
",",
"'html.parser'",
")",
"# parse name",
"raw",
"=",
"soup",
".",
"find",
"(",
"'h1'",
",",
"attrs",
"=",
"{",
"'class'",
":",
"'gh-headline'",
"}",
")",
"self",
".",
"device",
".",
"name",
"=",
"raw",
".",
"string",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"# parse prices",
"self",
".",
"device",
".",
"prices",
"=",
"[",
"]",
"for",
"tmp",
"in",
"soup",
".",
"select",
"(",
"'div.offer__price .gh_price'",
")",
":",
"matches",
"=",
"re",
".",
"search",
"(",
"_REGEX",
",",
"tmp",
".",
"string",
")",
"raw",
"=",
"'{}.{}'",
".",
"format",
"(",
"matches",
".",
"group",
"(",
"1",
")",
",",
"matches",
".",
"group",
"(",
"2",
")",
")",
"self",
".",
"device",
".",
"prices",
"+=",
"[",
"float",
"(",
"raw",
")",
"]",
"# parse unit",
"price_match",
"=",
"soup",
".",
"find",
"(",
"'span'",
",",
"attrs",
"=",
"{",
"'class'",
":",
"'gh_price'",
"}",
")",
"matches",
"=",
"re",
".",
"search",
"(",
"r'€|£|PLN', p",
"r",
"ce_match.st",
"r",
"ing)",
"",
"self",
".",
"device",
".",
"price_currency",
"=",
"matches",
".",
"group",
"(",
")",
"return",
"self",
".",
"device"
] | 36.454545 | 20.181818 |
def BVC(self, params):
"""
BVC label
Branch to the instruction at label if the V flag is not set
"""
label = self.get_one_parameter(self.ONE_PARAMETER, params)
self.check_arguments(label_exists=(label,))
# BVC label
def BVC_func():
if not self.is_V_set():
self.register['PC'] = self.labels[label]
return BVC_func
|
[
"def",
"BVC",
"(",
"self",
",",
"params",
")",
":",
"label",
"=",
"self",
".",
"get_one_parameter",
"(",
"self",
".",
"ONE_PARAMETER",
",",
"params",
")",
"self",
".",
"check_arguments",
"(",
"label_exists",
"=",
"(",
"label",
",",
")",
")",
"# BVC label",
"def",
"BVC_func",
"(",
")",
":",
"if",
"not",
"self",
".",
"is_V_set",
"(",
")",
":",
"self",
".",
"register",
"[",
"'PC'",
"]",
"=",
"self",
".",
"labels",
"[",
"label",
"]",
"return",
"BVC_func"
] | 25.0625 | 21.3125 |
def get_available_voices(self, language=None, gender=None):
"""
Returns a list of available voices, via 'ListVoices' endpoint
Docs:
http://developer.ivona.com/en/speechcloud/actions.html#ListVoices
:param language: returned voices language
:type language: str
:param gender: returned voices gender
:type gender: str
"""
endpoint = 'ListVoices'
data = dict()
if language:
data.update({'Voice': {'Language': language}})
if gender:
data.update({'Voice': {'Gender': gender}})
print(data)
response = self._get_response('get', endpoint, data)
return response.json()['Voices']
|
[
"def",
"get_available_voices",
"(",
"self",
",",
"language",
"=",
"None",
",",
"gender",
"=",
"None",
")",
":",
"endpoint",
"=",
"'ListVoices'",
"data",
"=",
"dict",
"(",
")",
"if",
"language",
":",
"data",
".",
"update",
"(",
"{",
"'Voice'",
":",
"{",
"'Language'",
":",
"language",
"}",
"}",
")",
"if",
"gender",
":",
"data",
".",
"update",
"(",
"{",
"'Voice'",
":",
"{",
"'Gender'",
":",
"gender",
"}",
"}",
")",
"print",
"(",
"data",
")",
"response",
"=",
"self",
".",
"_get_response",
"(",
"'get'",
",",
"endpoint",
",",
"data",
")",
"return",
"response",
".",
"json",
"(",
")",
"[",
"'Voices'",
"]"
] | 27.192308 | 22.192308 |
def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None):
""" Evaluate Henry's constant H at temperature T
Parameters
----------
T: float
Temperature (with units), assumed to be in Kelvin if ``units == None``
H: float
Henry's constant
Tderiv: float (optional)
dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``.
T0: float
Reference temperature, assumed to be in Kelvin if ``units == None``
default: 298.15 K
units: object (optional)
object with attributes: kelvin (e.g. chempy.units.default_units)
backend : module (optional)
module with "exp", default: numpy, math
"""
be = get_backend(backend)
if units is None:
K = 1
else:
K = units.Kelvin
if T0 is None:
T0 = 298.15*K
return H * be.exp(Tderiv*(1/T - 1/T0))
|
[
"def",
"Henry_H_at_T",
"(",
"T",
",",
"H",
",",
"Tderiv",
",",
"T0",
"=",
"None",
",",
"units",
"=",
"None",
",",
"backend",
"=",
"None",
")",
":",
"be",
"=",
"get_backend",
"(",
"backend",
")",
"if",
"units",
"is",
"None",
":",
"K",
"=",
"1",
"else",
":",
"K",
"=",
"units",
".",
"Kelvin",
"if",
"T0",
"is",
"None",
":",
"T0",
"=",
"298.15",
"*",
"K",
"return",
"H",
"*",
"be",
".",
"exp",
"(",
"Tderiv",
"*",
"(",
"1",
"/",
"T",
"-",
"1",
"/",
"T0",
")",
")"
] | 30.107143 | 21.142857 |
def htmlABF(ID,group,d,folder,overwrite=False):
"""given an ID and the dict of files, generate a static html for that abf."""
fname=folder+"/swhlab4/%s_index.html"%ID
if overwrite is False and os.path.exists(fname):
return
html=TEMPLATES['abf']
html=html.replace("~ID~",ID)
html=html.replace("~CONTENT~",htmlABFcontent(ID,group,d))
print(" <- writing [%s]"%os.path.basename(fname))
with open(fname,'w') as f:
f.write(html)
return
|
[
"def",
"htmlABF",
"(",
"ID",
",",
"group",
",",
"d",
",",
"folder",
",",
"overwrite",
"=",
"False",
")",
":",
"fname",
"=",
"folder",
"+",
"\"/swhlab4/%s_index.html\"",
"%",
"ID",
"if",
"overwrite",
"is",
"False",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"return",
"html",
"=",
"TEMPLATES",
"[",
"'abf'",
"]",
"html",
"=",
"html",
".",
"replace",
"(",
"\"~ID~\"",
",",
"ID",
")",
"html",
"=",
"html",
".",
"replace",
"(",
"\"~CONTENT~\"",
",",
"htmlABFcontent",
"(",
"ID",
",",
"group",
",",
"d",
")",
")",
"print",
"(",
"\" <- writing [%s]\"",
"%",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html",
")",
"return"
] | 39.166667 | 13.75 |
def target(self, hosts):
"""Temporarily retarget the client for one call. This is useful
when having to deal with a subset of hosts for one call.
"""
if self.__is_retargeted:
raise TypeError('Cannot use target more than once.')
rv = FanoutClient(hosts, connection_pool=self.connection_pool,
max_concurrency=self._max_concurrency)
rv._cb_poll = self._cb_poll
rv.__is_retargeted = True
return rv
|
[
"def",
"target",
"(",
"self",
",",
"hosts",
")",
":",
"if",
"self",
".",
"__is_retargeted",
":",
"raise",
"TypeError",
"(",
"'Cannot use target more than once.'",
")",
"rv",
"=",
"FanoutClient",
"(",
"hosts",
",",
"connection_pool",
"=",
"self",
".",
"connection_pool",
",",
"max_concurrency",
"=",
"self",
".",
"_max_concurrency",
")",
"rv",
".",
"_cb_poll",
"=",
"self",
".",
"_cb_poll",
"rv",
".",
"__is_retargeted",
"=",
"True",
"return",
"rv"
] | 44.181818 | 14.636364 |
def strace_set_buffer_size(self, size):
"""Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error.
"""
size = ctypes.c_uint32(size)
res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.SET_BUFFER_SIZE, size)
if res < 0:
raise errors.JLinkException('Failed to set the STRACE buffer size.')
return None
|
[
"def",
"strace_set_buffer_size",
"(",
"self",
",",
"size",
")",
":",
"size",
"=",
"ctypes",
".",
"c_uint32",
"(",
"size",
")",
"res",
"=",
"self",
".",
"_dll",
".",
"JLINK_STRACE_Control",
"(",
"enums",
".",
"JLinkStraceCommand",
".",
"SET_BUFFER_SIZE",
",",
"size",
")",
"if",
"res",
"<",
"0",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'Failed to set the STRACE buffer size.'",
")",
"return",
"None"
] | 26.611111 | 22.722222 |
def spa_tmplt_precondition(length, delta_f, kmin=0):
"""Return the amplitude portion of the TaylorF2 approximant, used to precondition
the strain data. The result is cached, and so should not be modified only read.
"""
global _prec
if _prec is None or _prec.delta_f != delta_f or len(_prec) < length:
v = numpy.arange(0, (kmin+length*2), 1.0) * delta_f
v = numpy.power(v[1:len(v)], -7.0/6.0)
_prec = FrequencySeries(v, delta_f=delta_f, dtype=float32)
return _prec[kmin:kmin + length]
|
[
"def",
"spa_tmplt_precondition",
"(",
"length",
",",
"delta_f",
",",
"kmin",
"=",
"0",
")",
":",
"global",
"_prec",
"if",
"_prec",
"is",
"None",
"or",
"_prec",
".",
"delta_f",
"!=",
"delta_f",
"or",
"len",
"(",
"_prec",
")",
"<",
"length",
":",
"v",
"=",
"numpy",
".",
"arange",
"(",
"0",
",",
"(",
"kmin",
"+",
"length",
"*",
"2",
")",
",",
"1.0",
")",
"*",
"delta_f",
"v",
"=",
"numpy",
".",
"power",
"(",
"v",
"[",
"1",
":",
"len",
"(",
"v",
")",
"]",
",",
"-",
"7.0",
"/",
"6.0",
")",
"_prec",
"=",
"FrequencySeries",
"(",
"v",
",",
"delta_f",
"=",
"delta_f",
",",
"dtype",
"=",
"float32",
")",
"return",
"_prec",
"[",
"kmin",
":",
"kmin",
"+",
"length",
"]"
] | 52.2 | 16.6 |
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update_spec contains the new document.
# Update the key in Solr based on the unique_key mentioned as
# parameter.
update_spec['_id'] = doc[self.unique_key]
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
|
[
"def",
"apply_update",
"(",
"self",
",",
"doc",
",",
"update_spec",
")",
":",
"# Replace a whole document",
"if",
"not",
"'$set'",
"in",
"update_spec",
"and",
"not",
"'$unset'",
"in",
"update_spec",
":",
"# update_spec contains the new document.",
"# Update the key in Solr based on the unique_key mentioned as",
"# parameter.",
"update_spec",
"[",
"'_id'",
"]",
"=",
"doc",
"[",
"self",
".",
"unique_key",
"]",
"return",
"update_spec",
"for",
"to_set",
"in",
"update_spec",
".",
"get",
"(",
"\"$set\"",
",",
"[",
"]",
")",
":",
"value",
"=",
"update_spec",
"[",
"'$set'",
"]",
"[",
"to_set",
"]",
"# Find dotted-path to the value, remove that key from doc, then",
"# put value at key:",
"keys_to_pop",
"=",
"[",
"]",
"for",
"key",
"in",
"doc",
":",
"if",
"key",
".",
"startswith",
"(",
"to_set",
")",
":",
"if",
"key",
"==",
"to_set",
"or",
"key",
"[",
"len",
"(",
"to_set",
")",
"]",
"==",
"'.'",
":",
"keys_to_pop",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"keys_to_pop",
":",
"doc",
".",
"pop",
"(",
"key",
")",
"doc",
"[",
"to_set",
"]",
"=",
"value",
"for",
"to_unset",
"in",
"update_spec",
".",
"get",
"(",
"\"$unset\"",
",",
"[",
"]",
")",
":",
"# MongoDB < 2.5.2 reports $unset for fields that don't exist within",
"# the document being updated.",
"keys_to_pop",
"=",
"[",
"]",
"for",
"key",
"in",
"doc",
":",
"if",
"key",
".",
"startswith",
"(",
"to_unset",
")",
":",
"if",
"key",
"==",
"to_unset",
"or",
"key",
"[",
"len",
"(",
"to_unset",
")",
"]",
"==",
"'.'",
":",
"keys_to_pop",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"keys_to_pop",
":",
"doc",
".",
"pop",
"(",
"key",
")",
"return",
"doc"
] | 44.5 | 13.1875 |
def add_attribute(self, name, value):
"""
Adds given attribute to the node.
Usage::
>>> node_a = AbstractNode()
>>> node_a.add_attribute("attributeA", Attribute())
True
>>> node_a.list_attributes()
[u'attributeA']
:param name: Attribute name.
:type name: unicode
:param value: Attribute value.
:type value: Attribute
:return: Method success.
:rtype: bool
"""
if not issubclass(value.__class__, Attribute):
raise foundations.exceptions.NodeAttributeTypeError(
"Node attribute value must be a '{0}' class instance!".format(Attribute.__class__.__name__))
if self.attribute_exists(name):
raise foundations.exceptions.NodeAttributeExistsError("Node attribute '{0}' already exists!".format(name))
self[name] = value
return True
|
[
"def",
"add_attribute",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"not",
"issubclass",
"(",
"value",
".",
"__class__",
",",
"Attribute",
")",
":",
"raise",
"foundations",
".",
"exceptions",
".",
"NodeAttributeTypeError",
"(",
"\"Node attribute value must be a '{0}' class instance!\"",
".",
"format",
"(",
"Attribute",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"self",
".",
"attribute_exists",
"(",
"name",
")",
":",
"raise",
"foundations",
".",
"exceptions",
".",
"NodeAttributeExistsError",
"(",
"\"Node attribute '{0}' already exists!\"",
".",
"format",
"(",
"name",
")",
")",
"self",
"[",
"name",
"]",
"=",
"value",
"return",
"True"
] | 31.413793 | 20.931034 |
def entrypoint(func):
"""
A decorator for your main() function.
Really a combination of @autorun and @acceptargv, so will run the
function if __name__ == '__main__' with arguments extricated from
argparse.
As with @acceptargv, this must either be the innermost decorator, or
separated only by "well-behaved" decorators that preserve the __doc__
attribute AND the function signature.
As with @autorun, this must be theoutermost decorator, as any
decorators further out will not be applied to the function until after
it is run.
"""
frame_local = sys._getframe(1).f_locals
if '__name__' in frame_local and frame_local['__name__'] == '__main__':
argv = sys.argv[1:]
parser = signature_parser(func)
try:
kwargs = parser.parse_args(argv).__dict__
# special cli flags
# --version is handled by ArgParse
# if kwargs.get('version'):
# print module_version(func)
# return
if 'version' in kwargs.keys():
del kwargs['version']
# --debug
FORMAT = '%(asctime)-6s: %(name)s - %(levelname)s - %(message)s'
if kwargs.get('debug'):
logging.basicConfig(
level=logging.DEBUG,
format=FORMAT,
)
del kwargs['debug']
if "__args" in kwargs:
return func(*_correct_args(func, kwargs))
else:
return func(**kwargs)
except UsageError, e:
parser.error(e.message)
return func
|
[
"def",
"entrypoint",
"(",
"func",
")",
":",
"frame_local",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_locals",
"if",
"'__name__'",
"in",
"frame_local",
"and",
"frame_local",
"[",
"'__name__'",
"]",
"==",
"'__main__'",
":",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"parser",
"=",
"signature_parser",
"(",
"func",
")",
"try",
":",
"kwargs",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
".",
"__dict__",
"# special cli flags",
"# --version is handled by ArgParse",
"# if kwargs.get('version'):",
"# print module_version(func)",
"# return",
"if",
"'version'",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"del",
"kwargs",
"[",
"'version'",
"]",
"# --debug",
"FORMAT",
"=",
"'%(asctime)-6s: %(name)s - %(levelname)s - %(message)s'",
"if",
"kwargs",
".",
"get",
"(",
"'debug'",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"FORMAT",
",",
")",
"del",
"kwargs",
"[",
"'debug'",
"]",
"if",
"\"__args\"",
"in",
"kwargs",
":",
"return",
"func",
"(",
"*",
"_correct_args",
"(",
"func",
",",
"kwargs",
")",
")",
"else",
":",
"return",
"func",
"(",
"*",
"*",
"kwargs",
")",
"except",
"UsageError",
",",
"e",
":",
"parser",
".",
"error",
"(",
"e",
".",
"message",
")",
"return",
"func"
] | 31.960784 | 19.72549 |
def add_colons(s):
"""Add colons after every second digit.
This function is used in functions to prettify serials.
>>> add_colons('teststring')
'te:st:st:ri:ng'
"""
return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)])
|
[
"def",
"add_colons",
"(",
"s",
")",
":",
"return",
"':'",
".",
"join",
"(",
"[",
"s",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"s",
")",
",",
"2",
")",
"]",
")"
] | 26.777778 | 19 |
def correct_scanpy(adatas, **kwargs):
"""Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
"""
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas
|
[
"def",
"correct_scanpy",
"(",
"adatas",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'return_dimred'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'return_dimred'",
"]",
":",
"datasets_dimred",
",",
"datasets",
",",
"genes",
"=",
"correct",
"(",
"[",
"adata",
".",
"X",
"for",
"adata",
"in",
"adatas",
"]",
",",
"[",
"adata",
".",
"var_names",
".",
"values",
"for",
"adata",
"in",
"adatas",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"datasets",
",",
"genes",
"=",
"correct",
"(",
"[",
"adata",
".",
"X",
"for",
"adata",
"in",
"adatas",
"]",
",",
"[",
"adata",
".",
"var_names",
".",
"values",
"for",
"adata",
"in",
"adatas",
"]",
",",
"*",
"*",
"kwargs",
")",
"new_adatas",
"=",
"[",
"]",
"for",
"i",
",",
"adata",
"in",
"enumerate",
"(",
"adatas",
")",
":",
"adata",
".",
"X",
"=",
"datasets",
"[",
"i",
"]",
"new_adatas",
".",
"append",
"(",
"adata",
")",
"if",
"'return_dimred'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'return_dimred'",
"]",
":",
"return",
"datasets_dimred",
",",
"new_adatas",
"else",
":",
"return",
"new_adatas"
] | 32.155556 | 20.577778 |
def layers(self):
"""
similar as parent images, except that it uses /history API endpoint
:return:
"""
# sample output:
# {
# "Created": 1457116802,
# "Id": "sha256:507cb13a216097710f0d234668bf64a4c92949c573ba15eba13d05aad392fe04",
# "Size": 204692029,
# "Tags": [
# "docker.io/fedora:latest"
# ],
# "Comment": "",
# "CreatedBy": "/bin/sh -c #(nop) ADD file:bcb5e5c... in /"
# }
try:
response = self.d.history(self.image_id)
except docker.errors.NotFound:
raise NotAvailableAnymore()
layers = []
for l in response:
layer_id = l["Id"]
if layer_id == "<missing>":
layers.append(DockerImage(l, self.docker_backend))
else:
layers.append(self.docker_backend.get_image_by_id(layer_id))
return layers
|
[
"def",
"layers",
"(",
"self",
")",
":",
"# sample output:",
"# {",
"# \"Created\": 1457116802,",
"# \"Id\": \"sha256:507cb13a216097710f0d234668bf64a4c92949c573ba15eba13d05aad392fe04\",",
"# \"Size\": 204692029,",
"# \"Tags\": [",
"# \"docker.io/fedora:latest\"",
"# ],",
"# \"Comment\": \"\",",
"# \"CreatedBy\": \"/bin/sh -c #(nop) ADD file:bcb5e5c... in /\"",
"# }",
"try",
":",
"response",
"=",
"self",
".",
"d",
".",
"history",
"(",
"self",
".",
"image_id",
")",
"except",
"docker",
".",
"errors",
".",
"NotFound",
":",
"raise",
"NotAvailableAnymore",
"(",
")",
"layers",
"=",
"[",
"]",
"for",
"l",
"in",
"response",
":",
"layer_id",
"=",
"l",
"[",
"\"Id\"",
"]",
"if",
"layer_id",
"==",
"\"<missing>\"",
":",
"layers",
".",
"append",
"(",
"DockerImage",
"(",
"l",
",",
"self",
".",
"docker_backend",
")",
")",
"else",
":",
"layers",
".",
"append",
"(",
"self",
".",
"docker_backend",
".",
"get_image_by_id",
"(",
"layer_id",
")",
")",
"return",
"layers"
] | 32.896552 | 18.689655 |
def compile_dependencies(self, sourcepath, include_self=True):
"""
Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``.
"""
return super(SassProjectEventHandler, self).compile_dependencies(
sourcepath,
include_self=include_self
)
|
[
"def",
"compile_dependencies",
"(",
"self",
",",
"sourcepath",
",",
"include_self",
"=",
"True",
")",
":",
"return",
"super",
"(",
"SassProjectEventHandler",
",",
"self",
")",
".",
"compile_dependencies",
"(",
"sourcepath",
",",
"include_self",
"=",
"include_self",
")"
] | 37.333333 | 15.777778 |
def store_shot(self):
"""Store current cregs to shots_result"""
def to_str(cregs):
return ''.join(str(b) for b in cregs)
key = to_str(self.cregs)
self.shots_result[key] = self.shots_result.get(key, 0) + 1
|
[
"def",
"store_shot",
"(",
"self",
")",
":",
"def",
"to_str",
"(",
"cregs",
")",
":",
"return",
"''",
".",
"join",
"(",
"str",
"(",
"b",
")",
"for",
"b",
"in",
"cregs",
")",
"key",
"=",
"to_str",
"(",
"self",
".",
"cregs",
")",
"self",
".",
"shots_result",
"[",
"key",
"]",
"=",
"self",
".",
"shots_result",
".",
"get",
"(",
"key",
",",
"0",
")",
"+",
"1"
] | 40.5 | 12.666667 |
def job_factory(self):
"""
Create concrete jobs. The concrete jobs is following dictionary.
jobs = {
'PLUGINNAME-build_items': {
'method': FUNCTION_OBJECT,
'interval': INTERVAL_TIME ,
}
...
}
If ConcreteJob instance has "build_discovery_items",
"build_discovery_items" method is added to jobs.
warn: looped method is deprecated in 0.4.0.
You should implemente "build_items" instead of "looped_method".
In most cases you need only to change the method name.
"""
jobs = dict()
for section, options in self.config.items():
if section == 'global':
continue
# Since validate in utils/configread, does not occur here Error
# In the other sections are global,
# that there is a "module" option is collateral.
plugin_name = options['module']
job_kls = self.plugins[plugin_name]
if hasattr(job_kls, '__init__'):
job_argspec = inspect.getargspec(job_kls.__init__)
if 'stats_queue' in job_argspec.args:
job_obj = job_kls(
options=options,
queue=self.queue,
stats_queue=self.stats_queue,
logger=self.logger
)
else:
job_obj = job_kls(
options=options,
queue=self.queue,
logger=self.logger
)
# Deprecated!!
if hasattr(job_obj, 'looped_method'):
self.logger.warn(
('{0}\'s "looped_method" is deprecated.'
'Pleases change method name to "build_items"'
''.format(plugin_name))
)
name = '-'.join([section, 'looped_method'])
interval = 60
if 'interval' in options:
interval = options['interval']
elif 'interval' in self.config['global']:
interval = self.config['global']['interval']
jobs[name] = {
'method': job_obj.looped_method,
'interval': interval,
}
if hasattr(job_obj, 'build_items'):
name = '-'.join([section, 'build_items'])
interval = 60
if 'interval' in options:
interval = options['interval']
elif 'interval' in self.config['global']:
interval = self.config['global']['interval']
jobs[name] = {
'method': job_obj.build_items,
'interval': interval,
}
self.logger.info(
'load plugin {0} (interval {1})'
''.format(plugin_name, interval)
)
if hasattr(job_obj, 'build_discovery_items'):
name = '-'.join([section, 'build_discovery_items'])
lld_interval = 600
if 'lld_interval' in options:
lld_interval = options['lld_interval']
elif 'lld_interval' in self.config['global']:
lld_interval = self.config['global']['lld_interval']
jobs[name] = {
'method': job_obj.build_discovery_items,
'interval': lld_interval,
}
self.logger.info(
'load plugin {0} (lld_interval {1})'
''.format(plugin_name, lld_interval)
)
return jobs
|
[
"def",
"job_factory",
"(",
"self",
")",
":",
"jobs",
"=",
"dict",
"(",
")",
"for",
"section",
",",
"options",
"in",
"self",
".",
"config",
".",
"items",
"(",
")",
":",
"if",
"section",
"==",
"'global'",
":",
"continue",
"# Since validate in utils/configread, does not occur here Error",
"# In the other sections are global,",
"# that there is a \"module\" option is collateral.",
"plugin_name",
"=",
"options",
"[",
"'module'",
"]",
"job_kls",
"=",
"self",
".",
"plugins",
"[",
"plugin_name",
"]",
"if",
"hasattr",
"(",
"job_kls",
",",
"'__init__'",
")",
":",
"job_argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"job_kls",
".",
"__init__",
")",
"if",
"'stats_queue'",
"in",
"job_argspec",
".",
"args",
":",
"job_obj",
"=",
"job_kls",
"(",
"options",
"=",
"options",
",",
"queue",
"=",
"self",
".",
"queue",
",",
"stats_queue",
"=",
"self",
".",
"stats_queue",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"else",
":",
"job_obj",
"=",
"job_kls",
"(",
"options",
"=",
"options",
",",
"queue",
"=",
"self",
".",
"queue",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"# Deprecated!!",
"if",
"hasattr",
"(",
"job_obj",
",",
"'looped_method'",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"(",
"'{0}\\'s \"looped_method\" is deprecated.'",
"'Pleases change method name to \"build_items\"'",
"''",
".",
"format",
"(",
"plugin_name",
")",
")",
")",
"name",
"=",
"'-'",
".",
"join",
"(",
"[",
"section",
",",
"'looped_method'",
"]",
")",
"interval",
"=",
"60",
"if",
"'interval'",
"in",
"options",
":",
"interval",
"=",
"options",
"[",
"'interval'",
"]",
"elif",
"'interval'",
"in",
"self",
".",
"config",
"[",
"'global'",
"]",
":",
"interval",
"=",
"self",
".",
"config",
"[",
"'global'",
"]",
"[",
"'interval'",
"]",
"jobs",
"[",
"name",
"]",
"=",
"{",
"'method'",
":",
"job_obj",
".",
"looped_method",
",",
"'interval'",
":",
"interval",
",",
"}",
"if",
"hasattr",
"(",
"job_obj",
",",
"'build_items'",
")",
":",
"name",
"=",
"'-'",
".",
"join",
"(",
"[",
"section",
",",
"'build_items'",
"]",
")",
"interval",
"=",
"60",
"if",
"'interval'",
"in",
"options",
":",
"interval",
"=",
"options",
"[",
"'interval'",
"]",
"elif",
"'interval'",
"in",
"self",
".",
"config",
"[",
"'global'",
"]",
":",
"interval",
"=",
"self",
".",
"config",
"[",
"'global'",
"]",
"[",
"'interval'",
"]",
"jobs",
"[",
"name",
"]",
"=",
"{",
"'method'",
":",
"job_obj",
".",
"build_items",
",",
"'interval'",
":",
"interval",
",",
"}",
"self",
".",
"logger",
".",
"info",
"(",
"'load plugin {0} (interval {1})'",
"''",
".",
"format",
"(",
"plugin_name",
",",
"interval",
")",
")",
"if",
"hasattr",
"(",
"job_obj",
",",
"'build_discovery_items'",
")",
":",
"name",
"=",
"'-'",
".",
"join",
"(",
"[",
"section",
",",
"'build_discovery_items'",
"]",
")",
"lld_interval",
"=",
"600",
"if",
"'lld_interval'",
"in",
"options",
":",
"lld_interval",
"=",
"options",
"[",
"'lld_interval'",
"]",
"elif",
"'lld_interval'",
"in",
"self",
".",
"config",
"[",
"'global'",
"]",
":",
"lld_interval",
"=",
"self",
".",
"config",
"[",
"'global'",
"]",
"[",
"'lld_interval'",
"]",
"jobs",
"[",
"name",
"]",
"=",
"{",
"'method'",
":",
"job_obj",
".",
"build_discovery_items",
",",
"'interval'",
":",
"lld_interval",
",",
"}",
"self",
".",
"logger",
".",
"info",
"(",
"'load plugin {0} (lld_interval {1})'",
"''",
".",
"format",
"(",
"plugin_name",
",",
"lld_interval",
")",
")",
"return",
"jobs"
] | 35.209524 | 17.838095 |
def multipart_content(*files):
"""Returns a mutlipart content.
Note:
This script was clearly inspired by write-mime-multipart.
"""
outer = MIMEMultipart()
for fname in files:
mtype = get_type(fname)
maintype, subtype = mtype.split('/', 1)
with open(fname) as f:
msg = MIMEText(f.read(), _subtype=subtype)
msg.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(fname))
outer.attach(msg)
return outer.as_string()
|
[
"def",
"multipart_content",
"(",
"*",
"files",
")",
":",
"outer",
"=",
"MIMEMultipart",
"(",
")",
"for",
"fname",
"in",
"files",
":",
"mtype",
"=",
"get_type",
"(",
"fname",
")",
"maintype",
",",
"subtype",
"=",
"mtype",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"with",
"open",
"(",
"fname",
")",
"as",
"f",
":",
"msg",
"=",
"MIMEText",
"(",
"f",
".",
"read",
"(",
")",
",",
"_subtype",
"=",
"subtype",
")",
"msg",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment'",
",",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"outer",
".",
"attach",
"(",
"msg",
")",
"return",
"outer",
".",
"as_string",
"(",
")"
] | 35 | 13.133333 |
def on_menu(self, event):
'''called on menu event'''
state = self.state
if self.popup_menu is not None:
ret = self.popup_menu.find_selected(event)
if ret is not None:
ret.popup_pos = self.popup_pos
if ret.returnkey == 'fitWindow':
self.fit_to_window()
elif ret.returnkey == 'fullSize':
self.full_size()
else:
state.out_queue.put(ret)
return
if self.menu is not None:
ret = self.menu.find_selected(event)
if ret is not None:
state.out_queue.put(ret)
return
|
[
"def",
"on_menu",
"(",
"self",
",",
"event",
")",
":",
"state",
"=",
"self",
".",
"state",
"if",
"self",
".",
"popup_menu",
"is",
"not",
"None",
":",
"ret",
"=",
"self",
".",
"popup_menu",
".",
"find_selected",
"(",
"event",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"ret",
".",
"popup_pos",
"=",
"self",
".",
"popup_pos",
"if",
"ret",
".",
"returnkey",
"==",
"'fitWindow'",
":",
"self",
".",
"fit_to_window",
"(",
")",
"elif",
"ret",
".",
"returnkey",
"==",
"'fullSize'",
":",
"self",
".",
"full_size",
"(",
")",
"else",
":",
"state",
".",
"out_queue",
".",
"put",
"(",
"ret",
")",
"return",
"if",
"self",
".",
"menu",
"is",
"not",
"None",
":",
"ret",
"=",
"self",
".",
"menu",
".",
"find_selected",
"(",
"event",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"state",
".",
"out_queue",
".",
"put",
"(",
"ret",
")",
"return"
] | 36.263158 | 8.894737 |
def seek(self, relative_position):
"""
Seek the video by `relative_position` seconds
Args:
relative_position (float): The position in seconds to seek to.
"""
self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))
self.seekEvent(self, relative_position)
|
[
"def",
"seek",
"(",
"self",
",",
"relative_position",
")",
":",
"self",
".",
"_player_interface",
".",
"Seek",
"(",
"Int64",
"(",
"1000.0",
"*",
"1000",
"*",
"relative_position",
")",
")",
"self",
".",
"seekEvent",
"(",
"self",
",",
"relative_position",
")"
] | 35.555556 | 18.222222 |
def posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None):
"""
Returns an estimate of the marginal distribution of a given model parameter, based on
taking the derivative of the interpolated cdf.
:param int idx_param: Index of parameter to be marginalized.
:param int res1: Resolution of of the axis.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth; same units as parameter.
:param float range_min: Minimum range of the output axis.
:param float range_max: Maximum range of the output axis.
.. seealso::
:meth:`SMCUpdater.plot_posterior_marginal`
"""
# We need to sort the particles to get cumsum to make sense.
# interp1d would do it anyways (using argsort, too), so it's not a waste
s = np.argsort(self.particle_locations[:,idx_param])
locs = self.particle_locations[s,idx_param]
# relevant axis discretization
r_min = np.min(locs) if range_min is None else range_min
r_max = np.max(locs) if range_max is None else range_max
ps = np.linspace(r_min, r_max, res)
# interpolate the cdf of the marginal distribution using cumsum
interp = scipy.interpolate.interp1d(
np.append(locs, r_max + np.abs(r_max-r_min)),
np.append(np.cumsum(self.particle_weights[s]), 1),
#kind='cubic',
bounds_error=False,
fill_value=0,
assume_sorted=True
)
# get distribution from derivative of cdf, and smooth it
pr = np.gradient(interp(ps), ps[1]-ps[0])
if smoothing > 0:
gaussian_filter1d(pr, res*smoothing/(np.abs(r_max-r_min)), output=pr)
del interp
return ps, pr
|
[
"def",
"posterior_marginal",
"(",
"self",
",",
"idx_param",
"=",
"0",
",",
"res",
"=",
"100",
",",
"smoothing",
"=",
"0",
",",
"range_min",
"=",
"None",
",",
"range_max",
"=",
"None",
")",
":",
"# We need to sort the particles to get cumsum to make sense.",
"# interp1d would do it anyways (using argsort, too), so it's not a waste",
"s",
"=",
"np",
".",
"argsort",
"(",
"self",
".",
"particle_locations",
"[",
":",
",",
"idx_param",
"]",
")",
"locs",
"=",
"self",
".",
"particle_locations",
"[",
"s",
",",
"idx_param",
"]",
"# relevant axis discretization",
"r_min",
"=",
"np",
".",
"min",
"(",
"locs",
")",
"if",
"range_min",
"is",
"None",
"else",
"range_min",
"r_max",
"=",
"np",
".",
"max",
"(",
"locs",
")",
"if",
"range_max",
"is",
"None",
"else",
"range_max",
"ps",
"=",
"np",
".",
"linspace",
"(",
"r_min",
",",
"r_max",
",",
"res",
")",
"# interpolate the cdf of the marginal distribution using cumsum",
"interp",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"np",
".",
"append",
"(",
"locs",
",",
"r_max",
"+",
"np",
".",
"abs",
"(",
"r_max",
"-",
"r_min",
")",
")",
",",
"np",
".",
"append",
"(",
"np",
".",
"cumsum",
"(",
"self",
".",
"particle_weights",
"[",
"s",
"]",
")",
",",
"1",
")",
",",
"#kind='cubic',",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"0",
",",
"assume_sorted",
"=",
"True",
")",
"# get distribution from derivative of cdf, and smooth it",
"pr",
"=",
"np",
".",
"gradient",
"(",
"interp",
"(",
"ps",
")",
",",
"ps",
"[",
"1",
"]",
"-",
"ps",
"[",
"0",
"]",
")",
"if",
"smoothing",
">",
"0",
":",
"gaussian_filter1d",
"(",
"pr",
",",
"res",
"*",
"smoothing",
"/",
"(",
"np",
".",
"abs",
"(",
"r_max",
"-",
"r_min",
")",
")",
",",
"output",
"=",
"pr",
")",
"del",
"interp",
"return",
"ps",
",",
"pr"
] | 39.888889 | 24.155556 |
def getStringForBytes(self, s):
"""
Returns the corresponding string for the supplied utf-8 encoded bytes.
If there is no string object, one is created.
@since: 0.6
"""
h = hash(s)
u = self._unicodes.get(h, None)
if u is not None:
return u
u = self._unicodes[h] = s.decode('utf-8')
return u
|
[
"def",
"getStringForBytes",
"(",
"self",
",",
"s",
")",
":",
"h",
"=",
"hash",
"(",
"s",
")",
"u",
"=",
"self",
".",
"_unicodes",
".",
"get",
"(",
"h",
",",
"None",
")",
"if",
"u",
"is",
"not",
"None",
":",
"return",
"u",
"u",
"=",
"self",
".",
"_unicodes",
"[",
"h",
"]",
"=",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"u"
] | 23.1875 | 20.6875 |
def extract_subset(self, subset, contract=True):
"""
Return all nodes in a subset.
We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments
"""
return [n for n in self.nodes() if subset in self.subsets(n, contract=contract)]
|
[
"def",
"extract_subset",
"(",
"self",
",",
"subset",
",",
"contract",
"=",
"True",
")",
":",
"return",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"nodes",
"(",
")",
"if",
"subset",
"in",
"self",
".",
"subsets",
"(",
"n",
",",
"contract",
"=",
"contract",
")",
"]"
] | 41 | 21.571429 |
def check_errors(self, uri, response):
'''
Check HTTP reponse for known errors
'''
if response.status == 401:
raise trolly.Unauthorised(uri, response)
if response.status != 200:
raise trolly.ResourceUnavailable(uri, response)
|
[
"def",
"check_errors",
"(",
"self",
",",
"uri",
",",
"response",
")",
":",
"if",
"response",
".",
"status",
"==",
"401",
":",
"raise",
"trolly",
".",
"Unauthorised",
"(",
"uri",
",",
"response",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"trolly",
".",
"ResourceUnavailable",
"(",
"uri",
",",
"response",
")"
] | 31.333333 | 16.222222 |
def collect_request_parameters(self, request):
"""Collect parameters in an object for convenient access"""
class OAuthParameters(object):
"""Used as a parameter container since plain object()s can't"""
pass
# Collect parameters
query = urlparse(request.url.decode("utf-8")).query
content_type = request.headers.get('Content-Type', '')
if request.form:
body = request.form.to_dict()
elif content_type == 'application/x-www-form-urlencoded':
body = request.data.decode("utf-8")
else:
body = ''
headers = dict(encode_params_utf8(request.headers.items()))
params = dict(collect_parameters(uri_query=query, body=body, headers=headers))
# Extract params and store for convenient and predictable access
oauth_params = OAuthParameters()
oauth_params.client_key = params.get(u'oauth_consumer_key')
oauth_params.resource_owner_key = params.get(u'oauth_token', None)
oauth_params.nonce = params.get(u'oauth_nonce')
oauth_params.timestamp = params.get(u'oauth_timestamp')
oauth_params.verifier = params.get(u'oauth_verifier', None)
oauth_params.callback_uri = params.get(u'oauth_callback', None)
oauth_params.realm = params.get(u'realm', None)
return oauth_params
|
[
"def",
"collect_request_parameters",
"(",
"self",
",",
"request",
")",
":",
"class",
"OAuthParameters",
"(",
"object",
")",
":",
"\"\"\"Used as a parameter container since plain object()s can't\"\"\"",
"pass",
"# Collect parameters",
"query",
"=",
"urlparse",
"(",
"request",
".",
"url",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
".",
"query",
"content_type",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"''",
")",
"if",
"request",
".",
"form",
":",
"body",
"=",
"request",
".",
"form",
".",
"to_dict",
"(",
")",
"elif",
"content_type",
"==",
"'application/x-www-form-urlencoded'",
":",
"body",
"=",
"request",
".",
"data",
".",
"decode",
"(",
"\"utf-8\"",
")",
"else",
":",
"body",
"=",
"''",
"headers",
"=",
"dict",
"(",
"encode_params_utf8",
"(",
"request",
".",
"headers",
".",
"items",
"(",
")",
")",
")",
"params",
"=",
"dict",
"(",
"collect_parameters",
"(",
"uri_query",
"=",
"query",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
")",
")",
"# Extract params and store for convenient and predictable access",
"oauth_params",
"=",
"OAuthParameters",
"(",
")",
"oauth_params",
".",
"client_key",
"=",
"params",
".",
"get",
"(",
"u'oauth_consumer_key'",
")",
"oauth_params",
".",
"resource_owner_key",
"=",
"params",
".",
"get",
"(",
"u'oauth_token'",
",",
"None",
")",
"oauth_params",
".",
"nonce",
"=",
"params",
".",
"get",
"(",
"u'oauth_nonce'",
")",
"oauth_params",
".",
"timestamp",
"=",
"params",
".",
"get",
"(",
"u'oauth_timestamp'",
")",
"oauth_params",
".",
"verifier",
"=",
"params",
".",
"get",
"(",
"u'oauth_verifier'",
",",
"None",
")",
"oauth_params",
".",
"callback_uri",
"=",
"params",
".",
"get",
"(",
"u'oauth_callback'",
",",
"None",
")",
"oauth_params",
".",
"realm",
"=",
"params",
".",
"get",
"(",
"u'realm'",
",",
"None",
")",
"return",
"oauth_params"
] | 46.413793 | 20.344828 |
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
|
[
"def",
"set_stderrthreshold",
"(",
"s",
")",
":",
"if",
"s",
"in",
"converter",
".",
"ABSL_LEVELS",
":",
"FLAGS",
".",
"stderrthreshold",
"=",
"converter",
".",
"ABSL_LEVELS",
"[",
"s",
"]",
"elif",
"isinstance",
"(",
"s",
",",
"str",
")",
"and",
"s",
".",
"upper",
"(",
")",
"in",
"converter",
".",
"ABSL_NAMES",
":",
"FLAGS",
".",
"stderrthreshold",
"=",
"s",
"else",
":",
"raise",
"ValueError",
"(",
"'set_stderrthreshold only accepts integer absl logging level '",
"'from -3 to 1, or case-insensitive string values '",
"\"'debug', 'info', 'warning', 'error', and 'fatal'. \"",
"'But found \"{}\" ({}).'",
".",
"format",
"(",
"s",
",",
"type",
"(",
"s",
")",
")",
")"
] | 37.285714 | 20 |
def get_scope_by_name(self, scope_name):
"""GetScopeByName.
[Preview API]
:param str scope_name:
:rtype: :class:`<IdentityScope> <azure.devops.v5_0.identity.models.IdentityScope>`
"""
query_parameters = {}
if scope_name is not None:
query_parameters['scopeName'] = self._serialize.query('scope_name', scope_name, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('IdentityScope', response)
|
[
"def",
"get_scope_by_name",
"(",
"self",
",",
"scope_name",
")",
":",
"query_parameters",
"=",
"{",
"}",
"if",
"scope_name",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'scopeName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'scope_name'",
",",
"scope_name",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'4e11e2bf-1e79-4eb5-8f34-a6337bd0de38'",
",",
"version",
"=",
"'5.0-preview.2'",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'IdentityScope'",
",",
"response",
")"
] | 48.928571 | 18.571429 |
def to_native(self, obj, name, value): # pylint:disable=unused-argument
"""Transform the MongoDB value into a Marrow Mongo value."""
if self.mapping:
for original, new in self.mapping.items():
value = value.replace(original, new)
return load(value, self.namespace)
|
[
"def",
"to_native",
"(",
"self",
",",
"obj",
",",
"name",
",",
"value",
")",
":",
"# pylint:disable=unused-argument",
"if",
"self",
".",
"mapping",
":",
"for",
"original",
",",
"new",
"in",
"self",
".",
"mapping",
".",
"items",
"(",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"original",
",",
"new",
")",
"return",
"load",
"(",
"value",
",",
"self",
".",
"namespace",
")"
] | 34.625 | 17.375 |
def request_ocsp(self):
"""
Called to request that the server sends stapled OCSP data, if
available. If this is not called on the client side then the server
will not send OCSP data. Should be used in conjunction with
:meth:`Context.set_ocsp_client_callback`.
"""
rc = _lib.SSL_set_tlsext_status_type(
self._ssl, _lib.TLSEXT_STATUSTYPE_ocsp
)
_openssl_assert(rc == 1)
|
[
"def",
"request_ocsp",
"(",
"self",
")",
":",
"rc",
"=",
"_lib",
".",
"SSL_set_tlsext_status_type",
"(",
"self",
".",
"_ssl",
",",
"_lib",
".",
"TLSEXT_STATUSTYPE_ocsp",
")",
"_openssl_assert",
"(",
"rc",
"==",
"1",
")"
] | 40.090909 | 15.545455 |
def get_object(self, request, object_id, *args, **kwargs):
"""
Make sure the object is fetched in the correct language.
"""
obj = super(TranslatableAdmin, self).get_object(request, object_id, *args, **kwargs)
if obj is not None and self._has_translatable_model(): # Allow fallback to regular models.
obj.set_current_language(self._language(request, obj), initialize=True)
return obj
|
[
"def",
"get_object",
"(",
"self",
",",
"request",
",",
"object_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"super",
"(",
"TranslatableAdmin",
",",
"self",
")",
".",
"get_object",
"(",
"request",
",",
"object_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"obj",
"is",
"not",
"None",
"and",
"self",
".",
"_has_translatable_model",
"(",
")",
":",
"# Allow fallback to regular models.",
"obj",
".",
"set_current_language",
"(",
"self",
".",
"_language",
"(",
"request",
",",
"obj",
")",
",",
"initialize",
"=",
"True",
")",
"return",
"obj"
] | 43.6 | 29.8 |
def createEditor(self, parent, column, operator, value):
"""
Creates a new editor for the system.
"""
editor = super(EnumPlugin, self).createEditor(parent,
column,
operator,
value)
editor.setEnum(column.enum())
if operator in ('contains', 'does not contain'):
editor.setCheckable(True)
editor.setCurrentValue(value)
return editor
|
[
"def",
"createEditor",
"(",
"self",
",",
"parent",
",",
"column",
",",
"operator",
",",
"value",
")",
":",
"editor",
"=",
"super",
"(",
"EnumPlugin",
",",
"self",
")",
".",
"createEditor",
"(",
"parent",
",",
"column",
",",
"operator",
",",
"value",
")",
"editor",
".",
"setEnum",
"(",
"column",
".",
"enum",
"(",
")",
")",
"if",
"operator",
"in",
"(",
"'contains'",
",",
"'does not contain'",
")",
":",
"editor",
".",
"setCheckable",
"(",
"True",
")",
"editor",
".",
"setCurrentValue",
"(",
"value",
")",
"return",
"editor"
] | 39 | 14.333333 |
def receive(self):
"""Receive TCP response, looping to get whole thing or timeout."""
try:
buffer = self._socket.recv(BUFFER_SIZE)
except socket.timeout as error:
# Something is wrong, assume it's offline temporarily
_LOGGER.error("Error receiving: %s", error)
# self._socket.close()
return ""
# Read until a newline or timeout
buffering = True
response = ''
while buffering:
if '\n' in buffer.decode("utf8"):
response = buffer.decode("utf8").split('\n')[0]
buffering = False
else:
try:
more = self._socket.recv(BUFFER_SIZE)
except socket.timeout:
more = None
if not more:
buffering = False
response = buffer.decode("utf8")
else:
buffer += more
return response
|
[
"def",
"receive",
"(",
"self",
")",
":",
"try",
":",
"buffer",
"=",
"self",
".",
"_socket",
".",
"recv",
"(",
"BUFFER_SIZE",
")",
"except",
"socket",
".",
"timeout",
"as",
"error",
":",
"# Something is wrong, assume it's offline temporarily",
"_LOGGER",
".",
"error",
"(",
"\"Error receiving: %s\"",
",",
"error",
")",
"# self._socket.close()",
"return",
"\"\"",
"# Read until a newline or timeout",
"buffering",
"=",
"True",
"response",
"=",
"''",
"while",
"buffering",
":",
"if",
"'\\n'",
"in",
"buffer",
".",
"decode",
"(",
"\"utf8\"",
")",
":",
"response",
"=",
"buffer",
".",
"decode",
"(",
"\"utf8\"",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"buffering",
"=",
"False",
"else",
":",
"try",
":",
"more",
"=",
"self",
".",
"_socket",
".",
"recv",
"(",
"BUFFER_SIZE",
")",
"except",
"socket",
".",
"timeout",
":",
"more",
"=",
"None",
"if",
"not",
"more",
":",
"buffering",
"=",
"False",
"response",
"=",
"buffer",
".",
"decode",
"(",
"\"utf8\"",
")",
"else",
":",
"buffer",
"+=",
"more",
"return",
"response"
] | 34.928571 | 14.071429 |
def mouseDrag(self, x, y, step=1):
""" Move the mouse point to position (x, y) in increments of step
"""
log.debug('mouseDrag %d,%d', x, y)
if x < self.x:
xsteps = [self.x - i for i in range(step, self.x - x + 1, step)]
else:
xsteps = range(self.x, x, step)
if y < self.y:
ysteps = [self.y - i for i in range(step, self.y - y + 1, step)]
else:
ysteps = range(self.y, y, step)
for ypos in ysteps:
time.sleep(.2)
self.mouseMove(self.x, ypos)
for xpos in xsteps:
time.sleep(.2)
self.mouseMove(xpos, self.y)
self.mouseMove(x, y)
return self
|
[
"def",
"mouseDrag",
"(",
"self",
",",
"x",
",",
"y",
",",
"step",
"=",
"1",
")",
":",
"log",
".",
"debug",
"(",
"'mouseDrag %d,%d'",
",",
"x",
",",
"y",
")",
"if",
"x",
"<",
"self",
".",
"x",
":",
"xsteps",
"=",
"[",
"self",
".",
"x",
"-",
"i",
"for",
"i",
"in",
"range",
"(",
"step",
",",
"self",
".",
"x",
"-",
"x",
"+",
"1",
",",
"step",
")",
"]",
"else",
":",
"xsteps",
"=",
"range",
"(",
"self",
".",
"x",
",",
"x",
",",
"step",
")",
"if",
"y",
"<",
"self",
".",
"y",
":",
"ysteps",
"=",
"[",
"self",
".",
"y",
"-",
"i",
"for",
"i",
"in",
"range",
"(",
"step",
",",
"self",
".",
"y",
"-",
"y",
"+",
"1",
",",
"step",
")",
"]",
"else",
":",
"ysteps",
"=",
"range",
"(",
"self",
".",
"y",
",",
"y",
",",
"step",
")",
"for",
"ypos",
"in",
"ysteps",
":",
"time",
".",
"sleep",
"(",
".2",
")",
"self",
".",
"mouseMove",
"(",
"self",
".",
"x",
",",
"ypos",
")",
"for",
"xpos",
"in",
"xsteps",
":",
"time",
".",
"sleep",
"(",
".2",
")",
"self",
".",
"mouseMove",
"(",
"xpos",
",",
"self",
".",
"y",
")",
"self",
".",
"mouseMove",
"(",
"x",
",",
"y",
")",
"return",
"self"
] | 28.04 | 18.52 |
def grading_status_text(self):
'''
A rendering of the grading that is an answer on the question
"Is grading finished?".
Used in duplicate view and submission list on the teacher backend.
'''
if self.assignment.is_graded():
if self.is_grading_finished():
return str('Yes ({0})'.format(self.grading))
else:
return str('No')
else:
return str('Not graded')
|
[
"def",
"grading_status_text",
"(",
"self",
")",
":",
"if",
"self",
".",
"assignment",
".",
"is_graded",
"(",
")",
":",
"if",
"self",
".",
"is_grading_finished",
"(",
")",
":",
"return",
"str",
"(",
"'Yes ({0})'",
".",
"format",
"(",
"self",
".",
"grading",
")",
")",
"else",
":",
"return",
"str",
"(",
"'No'",
")",
"else",
":",
"return",
"str",
"(",
"'Not graded'",
")"
] | 35.692308 | 17.230769 |
def _log_in(self):
'''Connect and login.
Coroutine.
'''
username = self._request.url_info.username or self._request.username or 'anonymous'
password = self._request.url_info.password or self._request.password or '-wpull@'
cached_login = self._login_table.get(self._control_connection)
if cached_login and cached_login == (username, password):
_logger.debug('Reusing existing login.')
return
try:
yield from self._commander.login(username, password)
except FTPServerError as error:
raise AuthenticationError('Login error: {}'.format(error)) \
from error
self._login_table[self._control_connection] = (username, password)
|
[
"def",
"_log_in",
"(",
"self",
")",
":",
"username",
"=",
"self",
".",
"_request",
".",
"url_info",
".",
"username",
"or",
"self",
".",
"_request",
".",
"username",
"or",
"'anonymous'",
"password",
"=",
"self",
".",
"_request",
".",
"url_info",
".",
"password",
"or",
"self",
".",
"_request",
".",
"password",
"or",
"'-wpull@'",
"cached_login",
"=",
"self",
".",
"_login_table",
".",
"get",
"(",
"self",
".",
"_control_connection",
")",
"if",
"cached_login",
"and",
"cached_login",
"==",
"(",
"username",
",",
"password",
")",
":",
"_logger",
".",
"debug",
"(",
"'Reusing existing login.'",
")",
"return",
"try",
":",
"yield",
"from",
"self",
".",
"_commander",
".",
"login",
"(",
"username",
",",
"password",
")",
"except",
"FTPServerError",
"as",
"error",
":",
"raise",
"AuthenticationError",
"(",
"'Login error: {}'",
".",
"format",
"(",
"error",
")",
")",
"from",
"error",
"self",
".",
"_login_table",
"[",
"self",
".",
"_control_connection",
"]",
"=",
"(",
"username",
",",
"password",
")"
] | 35.619048 | 28.857143 |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the CreateKeyPair response payload and decode it
into its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the private key unique identifier or
the public key unique identifier is missing from the encoded
payload.
"""
super(CreateKeyPairResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(
enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER,
local_buffer
):
self._private_key_unique_identifier = primitives.TextString(
tag=enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER
)
self._private_key_unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The CreateKeyPair response payload encoding is missing the "
"private key unique identifier."
)
if self.is_tag_next(
enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER,
local_buffer
):
self._public_key_unique_identifier = primitives.TextString(
tag=enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER
)
self._public_key_unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The CreateKeyPair response payload encoding is missing the "
"public key unique identifier."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(
enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE,
local_buffer
):
self._private_key_template_attribute = \
objects.TemplateAttribute(
tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE
)
self._private_key_template_attribute.read(
local_buffer,
kmip_version=kmip_version
)
if self.is_tag_next(
enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE,
local_buffer
):
self._public_key_template_attribute = \
objects.TemplateAttribute(
tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE
)
self._public_key_template_attribute.read(
local_buffer,
kmip_version=kmip_version
)
self.is_oversized(local_buffer)
|
[
"def",
"read",
"(",
"self",
",",
"input_buffer",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"CreateKeyPairResponsePayload",
",",
"self",
")",
".",
"read",
"(",
"input_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_buffer",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_buffer",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PRIVATE_KEY_UNIQUE_IDENTIFIER",
",",
"local_buffer",
")",
":",
"self",
".",
"_private_key_unique_identifier",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PRIVATE_KEY_UNIQUE_IDENTIFIER",
")",
"self",
".",
"_private_key_unique_identifier",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The CreateKeyPair response payload encoding is missing the \"",
"\"private key unique identifier.\"",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PUBLIC_KEY_UNIQUE_IDENTIFIER",
",",
"local_buffer",
")",
":",
"self",
".",
"_public_key_unique_identifier",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PUBLIC_KEY_UNIQUE_IDENTIFIER",
")",
"self",
".",
"_public_key_unique_identifier",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The CreateKeyPair response payload encoding is missing the \"",
"\"public key unique identifier.\"",
")",
"if",
"kmip_version",
"<",
"enums",
".",
"KMIPVersion",
".",
"KMIP_2_0",
":",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PRIVATE_KEY_TEMPLATE_ATTRIBUTE",
",",
"local_buffer",
")",
":",
"self",
".",
"_private_key_template_attribute",
"=",
"objects",
".",
"TemplateAttribute",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PRIVATE_KEY_TEMPLATE_ATTRIBUTE",
")",
"self",
".",
"_private_key_template_attribute",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PUBLIC_KEY_TEMPLATE_ATTRIBUTE",
",",
"local_buffer",
")",
":",
"self",
".",
"_public_key_template_attribute",
"=",
"objects",
".",
"TemplateAttribute",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PUBLIC_KEY_TEMPLATE_ATTRIBUTE",
")",
"self",
".",
"_public_key_template_attribute",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"is_oversized",
"(",
"local_buffer",
")"
] | 37.388235 | 20.047059 |
def create_dialog_node(self,
workspace_id,
dialog_node,
description=None,
conditions=None,
parent=None,
previous_sibling=None,
output=None,
context=None,
metadata=None,
next_step=None,
title=None,
node_type=None,
event_name=None,
variable=None,
actions=None,
digress_in=None,
digress_out=None,
digress_out_slots=None,
user_label=None,
**kwargs):
"""
Create dialog node.
Create a new dialog node.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str parent: The ID of the parent dialog node. This property is omitted if
the dialog node has no parent.
:param str previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict context: The context for the dialog node.
:param dict metadata: The metadata for the dialog node.
:param DialogNodeNextStep next_step: The next step to execute following this
dialog node.
:param str title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str node_type: How the dialog node is processed.
:param str event_name: How an `event_handler` node is processed.
:param str variable: The location in the dialog context where output is stored.
:param list[DialogNodeAction] actions: An array of objects describing any actions
to be invoked by the dialog node.
:param str digress_in: Whether this top-level dialog node can be digressed into.
:param str digress_out: Whether this dialog node can be returned to after a
digression.
:param str digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str user_label: A label that can be displayed externally to describe the
purpose of the node to users. This string must be no longer than 512 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
if output is not None:
output = self._convert_model(output, DialogNodeOutput)
if next_step is not None:
next_step = self._convert_model(next_step, DialogNodeNextStep)
if actions is not None:
actions = [
self._convert_model(x, DialogNodeAction) for x in actions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1',
'create_dialog_node')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'dialog_node': dialog_node,
'description': description,
'conditions': conditions,
'parent': parent,
'previous_sibling': previous_sibling,
'output': output,
'context': context,
'metadata': metadata,
'next_step': next_step,
'title': title,
'type': node_type,
'event_name': event_name,
'variable': variable,
'actions': actions,
'digress_in': digress_in,
'digress_out': digress_out,
'digress_out_slots': digress_out_slots,
'user_label': user_label
}
url = '/v1/workspaces/{0}/dialog_nodes'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
|
[
"def",
"create_dialog_node",
"(",
"self",
",",
"workspace_id",
",",
"dialog_node",
",",
"description",
"=",
"None",
",",
"conditions",
"=",
"None",
",",
"parent",
"=",
"None",
",",
"previous_sibling",
"=",
"None",
",",
"output",
"=",
"None",
",",
"context",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"next_step",
"=",
"None",
",",
"title",
"=",
"None",
",",
"node_type",
"=",
"None",
",",
"event_name",
"=",
"None",
",",
"variable",
"=",
"None",
",",
"actions",
"=",
"None",
",",
"digress_in",
"=",
"None",
",",
"digress_out",
"=",
"None",
",",
"digress_out_slots",
"=",
"None",
",",
"user_label",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"workspace_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'workspace_id must be provided'",
")",
"if",
"dialog_node",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'dialog_node must be provided'",
")",
"if",
"output",
"is",
"not",
"None",
":",
"output",
"=",
"self",
".",
"_convert_model",
"(",
"output",
",",
"DialogNodeOutput",
")",
"if",
"next_step",
"is",
"not",
"None",
":",
"next_step",
"=",
"self",
".",
"_convert_model",
"(",
"next_step",
",",
"DialogNodeNextStep",
")",
"if",
"actions",
"is",
"not",
"None",
":",
"actions",
"=",
"[",
"self",
".",
"_convert_model",
"(",
"x",
",",
"DialogNodeAction",
")",
"for",
"x",
"in",
"actions",
"]",
"headers",
"=",
"{",
"}",
"if",
"'headers'",
"in",
"kwargs",
":",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
")",
")",
"sdk_headers",
"=",
"get_sdk_headers",
"(",
"'conversation'",
",",
"'V1'",
",",
"'create_dialog_node'",
")",
"headers",
".",
"update",
"(",
"sdk_headers",
")",
"params",
"=",
"{",
"'version'",
":",
"self",
".",
"version",
"}",
"data",
"=",
"{",
"'dialog_node'",
":",
"dialog_node",
",",
"'description'",
":",
"description",
",",
"'conditions'",
":",
"conditions",
",",
"'parent'",
":",
"parent",
",",
"'previous_sibling'",
":",
"previous_sibling",
",",
"'output'",
":",
"output",
",",
"'context'",
":",
"context",
",",
"'metadata'",
":",
"metadata",
",",
"'next_step'",
":",
"next_step",
",",
"'title'",
":",
"title",
",",
"'type'",
":",
"node_type",
",",
"'event_name'",
":",
"event_name",
",",
"'variable'",
":",
"variable",
",",
"'actions'",
":",
"actions",
",",
"'digress_in'",
":",
"digress_in",
",",
"'digress_out'",
":",
"digress_out",
",",
"'digress_out_slots'",
":",
"digress_out_slots",
",",
"'user_label'",
":",
"user_label",
"}",
"url",
"=",
"'/v1/workspaces/{0}/dialog_nodes'",
".",
"format",
"(",
"*",
"self",
".",
"_encode_path_vars",
"(",
"workspace_id",
")",
")",
"response",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"'POST'",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"json",
"=",
"data",
",",
"accept_json",
"=",
"True",
")",
"return",
"response"
] | 45.238095 | 19.126984 |
def to_element(self):
"""Return an ElementTree Element based on this resource.
Returns:
~xml.etree.ElementTree.Element: an Element.
"""
if not self.protocol_info:
raise DIDLMetadataError('Could not create Element for this'
'resource:'
'protocolInfo not set (required).')
root = XML.Element('res')
# Required
root.attrib['protocolInfo'] = self.protocol_info
# Optional
if self.import_uri is not None:
root.attrib['importUri'] = self.import_uri
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.sample_frequency is not None:
root.attrib['sampleFrequency'] = str(self.sample_frequency)
if self.bits_per_sample is not None:
root.attrib['bitsPerSample'] = str(self.bits_per_sample)
if self.nr_audio_channels is not None:
root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.color_depth is not None:
root.attrib['colorDepth'] = str(self.color_depth)
if self.protection is not None:
root.attrib['protection'] = self.protection
root.text = self.uri
return root
|
[
"def",
"to_element",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"protocol_info",
":",
"raise",
"DIDLMetadataError",
"(",
"'Could not create Element for this'",
"'resource:'",
"'protocolInfo not set (required).'",
")",
"root",
"=",
"XML",
".",
"Element",
"(",
"'res'",
")",
"# Required",
"root",
".",
"attrib",
"[",
"'protocolInfo'",
"]",
"=",
"self",
".",
"protocol_info",
"# Optional",
"if",
"self",
".",
"import_uri",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'importUri'",
"]",
"=",
"self",
".",
"import_uri",
"if",
"self",
".",
"size",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'size'",
"]",
"=",
"str",
"(",
"self",
".",
"size",
")",
"if",
"self",
".",
"duration",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'duration'",
"]",
"=",
"self",
".",
"duration",
"if",
"self",
".",
"bitrate",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'bitrate'",
"]",
"=",
"str",
"(",
"self",
".",
"bitrate",
")",
"if",
"self",
".",
"sample_frequency",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'sampleFrequency'",
"]",
"=",
"str",
"(",
"self",
".",
"sample_frequency",
")",
"if",
"self",
".",
"bits_per_sample",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'bitsPerSample'",
"]",
"=",
"str",
"(",
"self",
".",
"bits_per_sample",
")",
"if",
"self",
".",
"nr_audio_channels",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'nrAudioChannels'",
"]",
"=",
"str",
"(",
"self",
".",
"nr_audio_channels",
")",
"if",
"self",
".",
"resolution",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'resolution'",
"]",
"=",
"self",
".",
"resolution",
"if",
"self",
".",
"color_depth",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'colorDepth'",
"]",
"=",
"str",
"(",
"self",
".",
"color_depth",
")",
"if",
"self",
".",
"protection",
"is",
"not",
"None",
":",
"root",
".",
"attrib",
"[",
"'protection'",
"]",
"=",
"self",
".",
"protection",
"root",
".",
"text",
"=",
"self",
".",
"uri",
"return",
"root"
] | 40.763158 | 15.105263 |
def _logging_env_conf_overrides(log_init_warnings=None):
"""Returns a dictionary that is empty or has a "logging" key that refers to
the (up to 3) key-value pairs that pertain to logging and are read from the env.
This is mainly a convenience function for ConfigWrapper so that it can accurately
report the source of the logging settings without
"""
# This is called from a locked section of _read_logging_config, so don't call that function or you'll get deadlock
global _LOGGING_ENV_CONF_OVERRIDES
if _LOGGING_ENV_CONF_OVERRIDES is not None:
return _LOGGING_ENV_CONF_OVERRIDES
with _LOGGING_ENV_CONF_OVERRIDES_LOCK:
if _LOGGING_ENV_CONF_OVERRIDES is not None:
return _LOGGING_ENV_CONF_OVERRIDES
level_from_env = os.environ.get("PEYOTL_LOGGING_LEVEL")
format_from_env = os.environ.get("PEYOTL_LOGGING_FORMAT")
log_file_path_from_env = os.environ.get("PEYOTL_LOG_FILE_PATH")
_LOGGING_ENV_CONF_OVERRIDES = {}
if level_from_env:
env_w_list = []
_get_logging_level(level_from_env, env_w_list)
if len(env_w_list) > 0:
if log_init_warnings is not None:
log_init_warnings.extend(env_w_list)
log_init_warnings.append('PEYOTL_LOGGING_LEVEL is invalid. Relying on setting from conf file.')
else:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['level'] = level_from_env
if format_from_env:
env_w_list = []
_get_logging_formatter(format_from_env, env_w_list)
if len(env_w_list) > 0:
if log_init_warnings is not None:
log_init_warnings.extend(env_w_list)
log_init_warnings.append('PEYOTL_LOGGING_FORMAT was invalid. Relying on setting from conf file.')
else:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['formatter'] = format_from_env
if log_file_path_from_env is not None:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['filepath'] = log_file_path_from_env
return _LOGGING_ENV_CONF_OVERRIDES
|
[
"def",
"_logging_env_conf_overrides",
"(",
"log_init_warnings",
"=",
"None",
")",
":",
"# This is called from a locked section of _read_logging_config, so don't call that function or you'll get deadlock",
"global",
"_LOGGING_ENV_CONF_OVERRIDES",
"if",
"_LOGGING_ENV_CONF_OVERRIDES",
"is",
"not",
"None",
":",
"return",
"_LOGGING_ENV_CONF_OVERRIDES",
"with",
"_LOGGING_ENV_CONF_OVERRIDES_LOCK",
":",
"if",
"_LOGGING_ENV_CONF_OVERRIDES",
"is",
"not",
"None",
":",
"return",
"_LOGGING_ENV_CONF_OVERRIDES",
"level_from_env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PEYOTL_LOGGING_LEVEL\"",
")",
"format_from_env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PEYOTL_LOGGING_FORMAT\"",
")",
"log_file_path_from_env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PEYOTL_LOG_FILE_PATH\"",
")",
"_LOGGING_ENV_CONF_OVERRIDES",
"=",
"{",
"}",
"if",
"level_from_env",
":",
"env_w_list",
"=",
"[",
"]",
"_get_logging_level",
"(",
"level_from_env",
",",
"env_w_list",
")",
"if",
"len",
"(",
"env_w_list",
")",
">",
"0",
":",
"if",
"log_init_warnings",
"is",
"not",
"None",
":",
"log_init_warnings",
".",
"extend",
"(",
"env_w_list",
")",
"log_init_warnings",
".",
"append",
"(",
"'PEYOTL_LOGGING_LEVEL is invalid. Relying on setting from conf file.'",
")",
"else",
":",
"_LOGGING_ENV_CONF_OVERRIDES",
".",
"setdefault",
"(",
"\"logging\"",
",",
"{",
"}",
")",
"[",
"'level'",
"]",
"=",
"level_from_env",
"if",
"format_from_env",
":",
"env_w_list",
"=",
"[",
"]",
"_get_logging_formatter",
"(",
"format_from_env",
",",
"env_w_list",
")",
"if",
"len",
"(",
"env_w_list",
")",
">",
"0",
":",
"if",
"log_init_warnings",
"is",
"not",
"None",
":",
"log_init_warnings",
".",
"extend",
"(",
"env_w_list",
")",
"log_init_warnings",
".",
"append",
"(",
"'PEYOTL_LOGGING_FORMAT was invalid. Relying on setting from conf file.'",
")",
"else",
":",
"_LOGGING_ENV_CONF_OVERRIDES",
".",
"setdefault",
"(",
"\"logging\"",
",",
"{",
"}",
")",
"[",
"'formatter'",
"]",
"=",
"format_from_env",
"if",
"log_file_path_from_env",
"is",
"not",
"None",
":",
"_LOGGING_ENV_CONF_OVERRIDES",
".",
"setdefault",
"(",
"\"logging\"",
",",
"{",
"}",
")",
"[",
"'filepath'",
"]",
"=",
"log_file_path_from_env",
"return",
"_LOGGING_ENV_CONF_OVERRIDES"
] | 56.578947 | 22.263158 |
def transform_qubits(self: TSelf_Operation,
func: Callable[[Qid], Qid]) -> TSelf_Operation:
"""Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
"""
return self.with_qubits(*(func(q) for q in self.qubits))
|
[
"def",
"transform_qubits",
"(",
"self",
":",
"TSelf_Operation",
",",
"func",
":",
"Callable",
"[",
"[",
"Qid",
"]",
",",
"Qid",
"]",
")",
"->",
"TSelf_Operation",
":",
"return",
"self",
".",
"with_qubits",
"(",
"*",
"(",
"func",
"(",
"q",
")",
"for",
"q",
"in",
"self",
".",
"qubits",
")",
")"
] | 37.692308 | 22.615385 |
def get_golden_topics(self, lang):
"""Return the topics mastered ("golden") by a user in a language."""
return [topic['title']
for topic in self.user_data.language_data[lang]['skills']
if topic['learned'] and topic['strength'] == 1.0]
|
[
"def",
"get_golden_topics",
"(",
"self",
",",
"lang",
")",
":",
"return",
"[",
"topic",
"[",
"'title'",
"]",
"for",
"topic",
"in",
"self",
".",
"user_data",
".",
"language_data",
"[",
"lang",
"]",
"[",
"'skills'",
"]",
"if",
"topic",
"[",
"'learned'",
"]",
"and",
"topic",
"[",
"'strength'",
"]",
"==",
"1.0",
"]"
] | 55.6 | 14.8 |
def find_by_dynamic_locator(self, template_locator, variables, find_all=False, search_object=None):
'''
Find with dynamic locator
@type template_locator: webdriverwrapper.support.locator.Locator
@param template_locator: Template locator w/ formatting bits to insert
@type variables: dict
@param variables: Dictionary of variable substitutions
@type find_all: bool
@param find_all: True to find all elements immediately, False for find single element only
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with.
If null, search will be on self.driver
@rtype: webdriverwrapper.WebElementWrapper or list()
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
template_variable_character = '%'
# raise an exception if user passed non-dictionary variables
if not isinstance(variables, dict):
raise TypeError('You must use a dictionary to populate locator variables')
# replace all variables that match the keys in 'variables' dict
locator = ""
for key in variables.keys():
locator = template_locator.replace(template_variable_character + key, variables[key])
return self.find(locator, find_all, search_object)
|
[
"def",
"find_by_dynamic_locator",
"(",
"self",
",",
"template_locator",
",",
"variables",
",",
"find_all",
"=",
"False",
",",
"search_object",
"=",
"None",
")",
":",
"template_variable_character",
"=",
"'%'",
"# raise an exception if user passed non-dictionary variables",
"if",
"not",
"isinstance",
"(",
"variables",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'You must use a dictionary to populate locator variables'",
")",
"# replace all variables that match the keys in 'variables' dict",
"locator",
"=",
"\"\"",
"for",
"key",
"in",
"variables",
".",
"keys",
"(",
")",
":",
"locator",
"=",
"template_locator",
".",
"replace",
"(",
"template_variable_character",
"+",
"key",
",",
"variables",
"[",
"key",
"]",
")",
"return",
"self",
".",
"find",
"(",
"locator",
",",
"find_all",
",",
"search_object",
")"
] | 55.344828 | 32.517241 |
def get_xy(self, xy, addr=True):
"""Get the agent with xy-coordinate in the grid. If *addr* is True,
returns only the agent's address.
If no such agent in the grid, returns None.
:raises:
:exc:`ValueError` if xy-coordinate is outside the environment's
grid.
"""
x = xy[0]
y = xy[1]
if x < self.origin[0] or x >= self.origin[0] + self.gs[0]:
raise ValueError("x-coordinate inappropriate ({})".format(x))
if y < self.origin[1] or y >= self.origin[1] + self.gs[1]:
raise ValueError("y-coordinate inappropriate ({})".format(y))
i = x - self.origin[0]
j = y - self.origin[1]
if addr:
return self.grid[i][j].addr
return self.grid[i][j]
|
[
"def",
"get_xy",
"(",
"self",
",",
"xy",
",",
"addr",
"=",
"True",
")",
":",
"x",
"=",
"xy",
"[",
"0",
"]",
"y",
"=",
"xy",
"[",
"1",
"]",
"if",
"x",
"<",
"self",
".",
"origin",
"[",
"0",
"]",
"or",
"x",
">=",
"self",
".",
"origin",
"[",
"0",
"]",
"+",
"self",
".",
"gs",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"x-coordinate inappropriate ({})\"",
".",
"format",
"(",
"x",
")",
")",
"if",
"y",
"<",
"self",
".",
"origin",
"[",
"1",
"]",
"or",
"y",
">=",
"self",
".",
"origin",
"[",
"1",
"]",
"+",
"self",
".",
"gs",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"y-coordinate inappropriate ({})\"",
".",
"format",
"(",
"y",
")",
")",
"i",
"=",
"x",
"-",
"self",
".",
"origin",
"[",
"0",
"]",
"j",
"=",
"y",
"-",
"self",
".",
"origin",
"[",
"1",
"]",
"if",
"addr",
":",
"return",
"self",
".",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"addr",
"return",
"self",
".",
"grid",
"[",
"i",
"]",
"[",
"j",
"]"
] | 35.227273 | 20.045455 |
def register_as_type(self, locator, object_factory):
"""
Registers a component using its type (a constructor function).
:param locator: a locator to identify component to be created.
:param object_factory: a component type.
"""
if locator == None:
raise Exception("Locator cannot be null")
if object_factory == None:
raise Exception("Factory cannot be null")
def factory(locator):
return object_factory()
self._registrations.append(Registration(locator, factory))
|
[
"def",
"register_as_type",
"(",
"self",
",",
"locator",
",",
"object_factory",
")",
":",
"if",
"locator",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Locator cannot be null\"",
")",
"if",
"object_factory",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Factory cannot be null\"",
")",
"def",
"factory",
"(",
"locator",
")",
":",
"return",
"object_factory",
"(",
")",
"self",
".",
"_registrations",
".",
"append",
"(",
"Registration",
"(",
"locator",
",",
"factory",
")",
")"
] | 32.882353 | 19.235294 |
def validate_units(self):
"""Ensure that wavelenth unit belongs to the correct class.
There is no check for throughput because it is unitless.
Raises
------
TypeError
Wavelength unit is not `~pysynphot.units.WaveUnits`.
"""
if (not isinstance(self.waveunits, units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit" % self.waveunits)
|
[
"def",
"validate_units",
"(",
"self",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"self",
".",
"waveunits",
",",
"units",
".",
"WaveUnits",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"%s is not a valid WaveUnit\"",
"%",
"self",
".",
"waveunits",
")"
] | 34.25 | 22.75 |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
text_format = gf.safe_unicode(self.actual_arguments[0])
if text_format == u"list":
text = gf.safe_unicode(self.actual_arguments[1])
elif text_format in TextFileFormat.ALLOWED_VALUES:
text = self.actual_arguments[1]
if not self.check_input_file(text):
return self.ERROR_EXIT_CODE
else:
return self.print_help()
l1_id_regex = self.has_option_with_value(u"--l1-id-regex")
l2_id_regex = self.has_option_with_value(u"--l2-id-regex")
l3_id_regex = self.has_option_with_value(u"--l3-id-regex")
id_regex = self.has_option_with_value(u"--id-regex")
id_format = self.has_option_with_value(u"--id-format")
class_regex = self.has_option_with_value(u"--class-regex")
sort = self.has_option_with_value(u"--sort")
parameters = {
gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort,
gc.PPN_TASK_OS_FILE_ID_REGEX: id_format
}
if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)):
self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format")
return self.ERROR_EXIT_CODE
if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None):
self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format")
return self.ERROR_EXIT_CODE
if (text_format in [TextFileFormat.PLAIN, TextFileFormat.SUBTITLES]) and (id_format is not None):
try:
identifier = id_format % 1
except (TypeError, ValueError):
self.print_error(u"The given string '%s' is not a valid id format" % id_format)
return self.ERROR_EXIT_CODE
text_file = self.get_text_file(text_format, text, parameters)
if text_file is None:
self.print_error(u"Unable to build a TextFile from the given parameters")
elif len(text_file) == 0:
self.print_error(u"No text fragments found")
else:
self.print_generic(text_file.__unicode__())
return self.NO_ERROR_EXIT_CODE
return self.ERROR_EXIT_CODE
|
[
"def",
"perform_command",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"actual_arguments",
")",
"<",
"2",
":",
"return",
"self",
".",
"print_help",
"(",
")",
"text_format",
"=",
"gf",
".",
"safe_unicode",
"(",
"self",
".",
"actual_arguments",
"[",
"0",
"]",
")",
"if",
"text_format",
"==",
"u\"list\"",
":",
"text",
"=",
"gf",
".",
"safe_unicode",
"(",
"self",
".",
"actual_arguments",
"[",
"1",
"]",
")",
"elif",
"text_format",
"in",
"TextFileFormat",
".",
"ALLOWED_VALUES",
":",
"text",
"=",
"self",
".",
"actual_arguments",
"[",
"1",
"]",
"if",
"not",
"self",
".",
"check_input_file",
"(",
"text",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"else",
":",
"return",
"self",
".",
"print_help",
"(",
")",
"l1_id_regex",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--l1-id-regex\"",
")",
"l2_id_regex",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--l2-id-regex\"",
")",
"l3_id_regex",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--l3-id-regex\"",
")",
"id_regex",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--id-regex\"",
")",
"id_format",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--id-format\"",
")",
"class_regex",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--class-regex\"",
")",
"sort",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--sort\"",
")",
"parameters",
"=",
"{",
"gc",
".",
"PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX",
":",
"l1_id_regex",
",",
"gc",
".",
"PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX",
":",
"l2_id_regex",
",",
"gc",
".",
"PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX",
":",
"l3_id_regex",
",",
"gc",
".",
"PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX",
":",
"id_regex",
",",
"gc",
".",
"PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX",
":",
"class_regex",
",",
"gc",
".",
"PPN_TASK_IS_TEXT_UNPARSED_ID_SORT",
":",
"sort",
",",
"gc",
".",
"PPN_TASK_OS_FILE_ID_REGEX",
":",
"id_format",
"}",
"if",
"(",
"text_format",
"==",
"TextFileFormat",
".",
"MUNPARSED",
")",
"and",
"(",
"(",
"l1_id_regex",
"is",
"None",
")",
"or",
"(",
"l2_id_regex",
"is",
"None",
")",
"or",
"(",
"l3_id_regex",
"is",
"None",
")",
")",
":",
"self",
".",
"print_error",
"(",
"u\"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format\"",
")",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"if",
"(",
"text_format",
"==",
"TextFileFormat",
".",
"UNPARSED",
")",
"and",
"(",
"id_regex",
"is",
"None",
")",
"and",
"(",
"class_regex",
"is",
"None",
")",
":",
"self",
".",
"print_error",
"(",
"u\"You must specify --id-regex and/or --class-regex for unparsed format\"",
")",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"if",
"(",
"text_format",
"in",
"[",
"TextFileFormat",
".",
"PLAIN",
",",
"TextFileFormat",
".",
"SUBTITLES",
"]",
")",
"and",
"(",
"id_format",
"is",
"not",
"None",
")",
":",
"try",
":",
"identifier",
"=",
"id_format",
"%",
"1",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"self",
".",
"print_error",
"(",
"u\"The given string '%s' is not a valid id format\"",
"%",
"id_format",
")",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"text_file",
"=",
"self",
".",
"get_text_file",
"(",
"text_format",
",",
"text",
",",
"parameters",
")",
"if",
"text_file",
"is",
"None",
":",
"self",
".",
"print_error",
"(",
"u\"Unable to build a TextFile from the given parameters\"",
")",
"elif",
"len",
"(",
"text_file",
")",
"==",
"0",
":",
"self",
".",
"print_error",
"(",
"u\"No text fragments found\"",
")",
"else",
":",
"self",
".",
"print_generic",
"(",
"text_file",
".",
"__unicode__",
"(",
")",
")",
"return",
"self",
".",
"NO_ERROR_EXIT_CODE",
"return",
"self",
".",
"ERROR_EXIT_CODE"
] | 49.946429 | 22.446429 |
def get_first_record(
cls,
download: bool=True
) -> NistBeaconValue:
"""
Get the first (oldest) record available. Since the first record
IS a known value in the system we can load it from constants.
:param download: 'True' will always reach out to NIST to get the
first record. 'False' returns a local copy.
:return: The first beacon value. 'None' otherwise.
"""
if download:
return NistBeacon.get_record(cls._INIT_RECORD.timestamp)
else:
return NistBeaconValue.from_json(cls._INIT_RECORD.json)
|
[
"def",
"get_first_record",
"(",
"cls",
",",
"download",
":",
"bool",
"=",
"True",
")",
"->",
"NistBeaconValue",
":",
"if",
"download",
":",
"return",
"NistBeacon",
".",
"get_record",
"(",
"cls",
".",
"_INIT_RECORD",
".",
"timestamp",
")",
"else",
":",
"return",
"NistBeaconValue",
".",
"from_json",
"(",
"cls",
".",
"_INIT_RECORD",
".",
"json",
")"
] | 36.529412 | 22.764706 |
def _get(self, scheme, host, port, path, assert_key=None):
"""
Execute a ES API call. Convert response into JSON and
optionally assert its structure.
"""
url = '%s://%s:%i/%s' % (scheme, host, port, path)
try:
request = urllib2.Request(url)
if self.config['user'] and self.config['password']:
base64string = base64.standard_b64encode(
'%s:%s' % (self.config['user'], self.config['password']))
request.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(request)
except Exception as err:
self.log.error("%s: %s" % (url, err))
return False
try:
doc = json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a" +
" json object")
return False
if assert_key and assert_key not in doc:
self.log.error("Bad response from elasticsearch, expected key "
"'%s' was missing for %s" % (assert_key, url))
return False
return doc
|
[
"def",
"_get",
"(",
"self",
",",
"scheme",
",",
"host",
",",
"port",
",",
"path",
",",
"assert_key",
"=",
"None",
")",
":",
"url",
"=",
"'%s://%s:%i/%s'",
"%",
"(",
"scheme",
",",
"host",
",",
"port",
",",
"path",
")",
"try",
":",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
")",
"if",
"self",
".",
"config",
"[",
"'user'",
"]",
"and",
"self",
".",
"config",
"[",
"'password'",
"]",
":",
"base64string",
"=",
"base64",
".",
"standard_b64encode",
"(",
"'%s:%s'",
"%",
"(",
"self",
".",
"config",
"[",
"'user'",
"]",
",",
"self",
".",
"config",
"[",
"'password'",
"]",
")",
")",
"request",
".",
"add_header",
"(",
"\"Authorization\"",
",",
"\"Basic %s\"",
"%",
"base64string",
")",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
"except",
"Exception",
"as",
"err",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"%s: %s\"",
"%",
"(",
"url",
",",
"err",
")",
")",
"return",
"False",
"try",
":",
"doc",
"=",
"json",
".",
"load",
"(",
"response",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Unable to parse response from elasticsearch as a\"",
"+",
"\" json object\"",
")",
"return",
"False",
"if",
"assert_key",
"and",
"assert_key",
"not",
"in",
"doc",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Bad response from elasticsearch, expected key \"",
"\"'%s' was missing for %s\"",
"%",
"(",
"assert_key",
",",
"url",
")",
")",
"return",
"False",
"return",
"doc"
] | 41.068966 | 18.103448 |
def selectitem(self, window_name, object_name, item_name):
"""
Select combo box / layered pane item
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param item_name: Item name to select
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
self._grabfocus(object_handle.AXWindow)
try:
object_handle.Press()
except AttributeError:
# AXPress doesn't work with Instruments
# So did the following work around
x, y, width, height = self._getobjectsize(object_handle)
# Mouse left click on the object
# Note: x + width/2, y + height / 2 doesn't work
self.generatemouseevent(x + 5, y + 5, "b1c")
self.wait(5)
handle = self._get_sub_menu_handle(object_handle, item_name)
x, y, width, height = self._getobjectsize(handle)
# on OSX 10.7 default "b1c" doesn't work
# so using "b1d", verified with Fusion test, this works
self.generatemouseevent(x + 5, y + 5, "b1d")
return 1
# Required for menuitem to appear in accessibility list
self.wait(1)
menu_list = re.split(";", item_name)
try:
menu_handle = self._internal_menu_handler(object_handle, menu_list,
True)
# Required for menuitem to appear in accessibility list
self.wait(1)
if not menu_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % \
menu_list[-1])
menu_handle.Press()
except LdtpServerException:
object_handle.activate()
object_handle.sendKey(AXKeyCodeConstants.ESCAPE)
raise
return 1
|
[
"def",
"selectitem",
"(",
"self",
",",
"window_name",
",",
"object_name",
",",
"item_name",
")",
":",
"object_handle",
"=",
"self",
".",
"_get_object_handle",
"(",
"window_name",
",",
"object_name",
")",
"if",
"not",
"object_handle",
".",
"AXEnabled",
":",
"raise",
"LdtpServerException",
"(",
"u\"Object %s state disabled\"",
"%",
"object_name",
")",
"self",
".",
"_grabfocus",
"(",
"object_handle",
".",
"AXWindow",
")",
"try",
":",
"object_handle",
".",
"Press",
"(",
")",
"except",
"AttributeError",
":",
"# AXPress doesn't work with Instruments",
"# So did the following work around",
"x",
",",
"y",
",",
"width",
",",
"height",
"=",
"self",
".",
"_getobjectsize",
"(",
"object_handle",
")",
"# Mouse left click on the object",
"# Note: x + width/2, y + height / 2 doesn't work",
"self",
".",
"generatemouseevent",
"(",
"x",
"+",
"5",
",",
"y",
"+",
"5",
",",
"\"b1c\"",
")",
"self",
".",
"wait",
"(",
"5",
")",
"handle",
"=",
"self",
".",
"_get_sub_menu_handle",
"(",
"object_handle",
",",
"item_name",
")",
"x",
",",
"y",
",",
"width",
",",
"height",
"=",
"self",
".",
"_getobjectsize",
"(",
"handle",
")",
"# on OSX 10.7 default \"b1c\" doesn't work",
"# so using \"b1d\", verified with Fusion test, this works",
"self",
".",
"generatemouseevent",
"(",
"x",
"+",
"5",
",",
"y",
"+",
"5",
",",
"\"b1d\"",
")",
"return",
"1",
"# Required for menuitem to appear in accessibility list",
"self",
".",
"wait",
"(",
"1",
")",
"menu_list",
"=",
"re",
".",
"split",
"(",
"\";\"",
",",
"item_name",
")",
"try",
":",
"menu_handle",
"=",
"self",
".",
"_internal_menu_handler",
"(",
"object_handle",
",",
"menu_list",
",",
"True",
")",
"# Required for menuitem to appear in accessibility list",
"self",
".",
"wait",
"(",
"1",
")",
"if",
"not",
"menu_handle",
".",
"AXEnabled",
":",
"raise",
"LdtpServerException",
"(",
"u\"Object %s state disabled\"",
"%",
"menu_list",
"[",
"-",
"1",
"]",
")",
"menu_handle",
".",
"Press",
"(",
")",
"except",
"LdtpServerException",
":",
"object_handle",
".",
"activate",
"(",
")",
"object_handle",
".",
"sendKey",
"(",
"AXKeyCodeConstants",
".",
"ESCAPE",
")",
"raise",
"return",
"1"
] | 43.132075 | 16.716981 |
def __fetch_1_27(self, from_date=None):
"""Fetch the pages from the backend url for MediaWiki >=1.27
The method retrieves, from a MediaWiki url, the
wiki pages.
:returns: a generator of pages
"""
logger.info("Looking for pages at url '%s'", self.url)
npages = 0 # number of pages processed
tpages = 0 # number of total pages
pages_done = [] # pages already retrieved in reviews API
namespaces_contents = self.__get_namespaces_contents()
arvcontinue = '' # pagination for getting revisions and their pages
while arvcontinue is not None:
raw_pages = self.client.get_pages_from_allrevisions(namespaces_contents, from_date, arvcontinue)
data_json = json.loads(raw_pages)
arvcontinue = data_json['continue']['arvcontinue'] if 'continue' in data_json else None
pages_json = data_json['query']['allrevisions']
for page in pages_json:
if page['pageid'] in pages_done:
logger.debug("Page %s already processed; skipped", page['pageid'])
continue
tpages += 1
pages_done.append(page['pageid'])
page_reviews = self.__get_page_reviews(page)
if not page_reviews:
logger.warning("Revisions not found in %s [page id: %s], page skipped",
page['title'], page['pageid'])
continue
yield page_reviews
npages += 1
logger.info("Total number of pages: %i, skipped %i", tpages, tpages - npages)
|
[
"def",
"__fetch_1_27",
"(",
"self",
",",
"from_date",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Looking for pages at url '%s'\"",
",",
"self",
".",
"url",
")",
"npages",
"=",
"0",
"# number of pages processed",
"tpages",
"=",
"0",
"# number of total pages",
"pages_done",
"=",
"[",
"]",
"# pages already retrieved in reviews API",
"namespaces_contents",
"=",
"self",
".",
"__get_namespaces_contents",
"(",
")",
"arvcontinue",
"=",
"''",
"# pagination for getting revisions and their pages",
"while",
"arvcontinue",
"is",
"not",
"None",
":",
"raw_pages",
"=",
"self",
".",
"client",
".",
"get_pages_from_allrevisions",
"(",
"namespaces_contents",
",",
"from_date",
",",
"arvcontinue",
")",
"data_json",
"=",
"json",
".",
"loads",
"(",
"raw_pages",
")",
"arvcontinue",
"=",
"data_json",
"[",
"'continue'",
"]",
"[",
"'arvcontinue'",
"]",
"if",
"'continue'",
"in",
"data_json",
"else",
"None",
"pages_json",
"=",
"data_json",
"[",
"'query'",
"]",
"[",
"'allrevisions'",
"]",
"for",
"page",
"in",
"pages_json",
":",
"if",
"page",
"[",
"'pageid'",
"]",
"in",
"pages_done",
":",
"logger",
".",
"debug",
"(",
"\"Page %s already processed; skipped\"",
",",
"page",
"[",
"'pageid'",
"]",
")",
"continue",
"tpages",
"+=",
"1",
"pages_done",
".",
"append",
"(",
"page",
"[",
"'pageid'",
"]",
")",
"page_reviews",
"=",
"self",
".",
"__get_page_reviews",
"(",
"page",
")",
"if",
"not",
"page_reviews",
":",
"logger",
".",
"warning",
"(",
"\"Revisions not found in %s [page id: %s], page skipped\"",
",",
"page",
"[",
"'title'",
"]",
",",
"page",
"[",
"'pageid'",
"]",
")",
"continue",
"yield",
"page_reviews",
"npages",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Total number of pages: %i, skipped %i\"",
",",
"tpages",
",",
"tpages",
"-",
"npages",
")"
] | 38.880952 | 24.190476 |
def update(self, activity_sid=values.unset, attributes=values.unset,
friendly_name=values.unset,
reject_pending_reservations=values.unset):
"""
Update the WorkerInstance
:param unicode activity_sid: The activity_sid
:param unicode attributes: The attributes
:param unicode friendly_name: The friendly_name
:param bool reject_pending_reservations: The reject_pending_reservations
:returns: Updated WorkerInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
"""
return self._proxy.update(
activity_sid=activity_sid,
attributes=attributes,
friendly_name=friendly_name,
reject_pending_reservations=reject_pending_reservations,
)
|
[
"def",
"update",
"(",
"self",
",",
"activity_sid",
"=",
"values",
".",
"unset",
",",
"attributes",
"=",
"values",
".",
"unset",
",",
"friendly_name",
"=",
"values",
".",
"unset",
",",
"reject_pending_reservations",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"update",
"(",
"activity_sid",
"=",
"activity_sid",
",",
"attributes",
"=",
"attributes",
",",
"friendly_name",
"=",
"friendly_name",
",",
"reject_pending_reservations",
"=",
"reject_pending_reservations",
",",
")"
] | 39.75 | 15.85 |
def tag(self, resource_id):
"""Update the request URI to include the Tag for specific retrieval.
Args:
resource_id (string): The tag name.
"""
self._request_uri = '{}/{}'.format(self._request_uri, self.tcex.safetag(resource_id))
|
[
"def",
"tag",
"(",
"self",
",",
"resource_id",
")",
":",
"self",
".",
"_request_uri",
"=",
"'{}/{}'",
".",
"format",
"(",
"self",
".",
"_request_uri",
",",
"self",
".",
"tcex",
".",
"safetag",
"(",
"resource_id",
")",
")"
] | 38.142857 | 20 |
def build_docs(location="doc-source", target=None, library="icetea_lib"):
"""
Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull.
"""
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
print("Documentation built.")
return 0
|
[
"def",
"build_docs",
"(",
"location",
"=",
"\"doc-source\"",
",",
"target",
"=",
"None",
",",
"library",
"=",
"\"icetea_lib\"",
")",
":",
"cmd_ar",
"=",
"[",
"\"sphinx-apidoc\"",
",",
"\"-o\"",
",",
"location",
",",
"library",
"]",
"try",
":",
"print",
"(",
"\"Generating api docs.\"",
")",
"retcode",
"=",
"check_call",
"(",
"cmd_ar",
")",
"except",
"CalledProcessError",
"as",
"error",
":",
"print",
"(",
"\"Documentation build failed. Return code: {}\"",
".",
"format",
"(",
"error",
".",
"returncode",
")",
")",
"return",
"3",
"except",
"OSError",
"as",
"error",
":",
"print",
"(",
"error",
")",
"print",
"(",
"\"Documentation build failed. Are you missing Sphinx? Please install sphinx using \"",
"\"'pip install sphinx'.\"",
")",
"return",
"3",
"target",
"=",
"\"doc{}html\"",
".",
"format",
"(",
"os",
".",
"sep",
")",
"if",
"target",
"is",
"None",
"else",
"target",
"cmd_ar",
"=",
"[",
"\"sphinx-build\"",
",",
"\"-b\"",
",",
"\"html\"",
",",
"location",
",",
"target",
"]",
"try",
":",
"print",
"(",
"\"Building html documentation.\"",
")",
"retcode",
"=",
"check_call",
"(",
"cmd_ar",
")",
"except",
"CalledProcessError",
"as",
"error",
":",
"print",
"(",
"\"Documentation build failed. Return code: {}\"",
".",
"format",
"(",
"error",
".",
"returncode",
")",
")",
"return",
"3",
"except",
"OSError",
"as",
"error",
":",
"print",
"(",
"error",
")",
"print",
"(",
"\"Documentation build failed. Are you missing Sphinx? Please install sphinx using \"",
"\"'pip install sphinx'.\"",
")",
"return",
"3",
"print",
"(",
"\"Documentation built.\"",
")",
"return",
"0"
] | 38.473684 | 19.368421 |
def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
"""
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal
|
[
"def",
"checkpoint",
"(",
"global_model",
",",
"local_model",
"=",
"None",
")",
":",
"sglobal",
"=",
"pickle",
".",
"dumps",
"(",
"global_model",
")",
"if",
"local_model",
"is",
"None",
":",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"None",
",",
"0",
")",
"del",
"sglobal",
"else",
":",
"slocal",
"=",
"pickle",
".",
"dumps",
"(",
"local_model",
")",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"slocal",
",",
"len",
"(",
"slocal",
")",
")",
"del",
"slocal",
"del",
"sglobal"
] | 36.40625 | 21.6875 |
def run_cutadapt(job, fastqs, univ_options, cutadapt_options):
"""
This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners.
ARGUMENTS
1. fastqs: Dict of list of input RNA-Seq fastqs
fastqs
+- 'tumor_rna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. cutadapt_options: Dict of parameters specific to cutadapt
cutadapt_options
|- 'a': <sequence of 3' adapter to trim from fwd read>
+- 'A': <sequence of 3' adapter to trim from rev read>
RETURN VALUES
1. output_files: Dict of cutadapted fastqs
output_files
|- 'rna_cutadapt_1.fastq': <JSid>
+- 'rna_cutadapt_2.fastq': <JSid>
This module corresponds to node 2 on the tree
"""
job.fileStore.logToMaster('Running cutadapt on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'rna_1.fastq' + fq_extn: fastqs['tumor_rna'][0],
'rna_2.fastq' + fq_extn: fastqs['tumor_rna'][1]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter
'-A', cutadapt_options['A'], # Rev read 3' adapter
'-m', '35', # Minimum size of read
'-o', docker_path('rna_cutadapt_1.fastq'), # Output for R1
'-p', docker_path('rna_cutadapt_2.fastq'), # Output for R2
input_files['rna_1.fastq'],
input_files['rna_2.fastq']]
docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for fastq_file in ['rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq']:
output_files[fastq_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file]))
return output_files
|
[
"def",
"run_cutadapt",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"cutadapt_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running cutadapt on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fq_extn",
"=",
"'.gz'",
"if",
"fastqs",
"[",
"'gzipped'",
"]",
"else",
"''",
"input_files",
"=",
"{",
"'rna_1.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"'tumor_rna'",
"]",
"[",
"0",
"]",
",",
"'rna_2.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"'tumor_rna'",
"]",
"[",
"1",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'-a'",
",",
"cutadapt_options",
"[",
"'a'",
"]",
",",
"# Fwd read 3' adapter",
"'-A'",
",",
"cutadapt_options",
"[",
"'A'",
"]",
",",
"# Rev read 3' adapter",
"'-m'",
",",
"'35'",
",",
"# Minimum size of read",
"'-o'",
",",
"docker_path",
"(",
"'rna_cutadapt_1.fastq'",
")",
",",
"# Output for R1",
"'-p'",
",",
"docker_path",
"(",
"'rna_cutadapt_2.fastq'",
")",
",",
"# Output for R2",
"input_files",
"[",
"'rna_1.fastq'",
"]",
",",
"input_files",
"[",
"'rna_2.fastq'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'cutadapt'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"fastq_file",
"in",
"[",
"'rna_cutadapt_1.fastq'",
",",
"'rna_cutadapt_2.fastq'",
"]",
":",
"output_files",
"[",
"fastq_file",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"fastq_file",
"]",
")",
")",
"return",
"output_files"
] | 48.488372 | 21.837209 |
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
|
[
"def",
"write",
"(",
"self",
",",
"text",
":",
"str",
")",
":",
"# Default color is NORMAL.",
"last_color",
"=",
"(",
"self",
".",
"_DARK_CODE",
",",
"0",
")",
"# We use splitlines with keepends in order to keep the line breaks.",
"# Then we split by using the console width.",
"original_lines",
"=",
"text",
".",
"splitlines",
"(",
"True",
")",
"lines",
"=",
"self",
".",
"_split_lines",
"(",
"original_lines",
")",
"if",
"self",
".",
"_width_limit",
"else",
"original_lines",
"# Print the new width-formatted lines.",
"for",
"line",
"in",
"lines",
":",
"# Print indents only at line beginnings.",
"if",
"not",
"self",
".",
"_in_line",
":",
"self",
".",
"_writer",
".",
"write",
"(",
"' '",
"*",
"self",
".",
"indents_sum",
")",
"# Remove colors if needed.",
"if",
"not",
"self",
".",
"_colors",
":",
"for",
"color_code",
"in",
"self",
".",
"_ANSI_REGEXP",
".",
"findall",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"self",
".",
"_ANSI_COLOR_CODE",
"%",
"(",
"color_code",
"[",
"0",
"]",
",",
"int",
"(",
"color_code",
"[",
"1",
"]",
")",
")",
",",
"''",
")",
"elif",
"not",
"self",
".",
"_ANSI_REGEXP",
".",
"match",
"(",
"line",
")",
":",
"# Check if the line starts with a color. If not, we apply the color from the last line.",
"line",
"=",
"self",
".",
"_ANSI_COLOR_CODE",
"%",
"(",
"last_color",
"[",
"0",
"]",
",",
"int",
"(",
"last_color",
"[",
"1",
"]",
")",
")",
"+",
"line",
"# Print the final line.",
"self",
".",
"_writer",
".",
"write",
"(",
"line",
")",
"# Update the in_line status.",
"self",
".",
"_in_line",
"=",
"not",
"line",
".",
"endswith",
"(",
"self",
".",
"LINE_SEP",
")",
"# Update the last color used.",
"if",
"self",
".",
"_colors",
":",
"last_color",
"=",
"self",
".",
"_ANSI_REGEXP",
".",
"findall",
"(",
"line",
")",
"[",
"-",
"1",
"]",
"# Update last position (if there was no line break in the end).",
"if",
"len",
"(",
"lines",
")",
">",
"0",
":",
"last_line",
"=",
"lines",
"[",
"-",
"1",
"]",
"if",
"not",
"last_line",
".",
"endswith",
"(",
"self",
".",
"LINE_SEP",
")",
":",
"# Strip the colors to figure out the real number of characters in the line.",
"if",
"self",
".",
"_colors",
":",
"for",
"color_code",
"in",
"self",
".",
"_ANSI_REGEXP",
".",
"findall",
"(",
"last_line",
")",
":",
"last_line",
"=",
"last_line",
".",
"replace",
"(",
"self",
".",
"_ANSI_COLOR_CODE",
"%",
"(",
"color_code",
"[",
"0",
"]",
",",
"int",
"(",
"color_code",
"[",
"1",
"]",
")",
")",
",",
"''",
")",
"self",
".",
"_last_position",
"+=",
"len",
"(",
"last_line",
")",
"else",
":",
"self",
".",
"_last_position",
"=",
"0",
"self",
".",
"_is_first_line",
"=",
"False",
"else",
":",
"self",
".",
"_last_position",
"=",
"0",
"# Reset colors for the next print.",
"if",
"self",
".",
"_colors",
"and",
"not",
"text",
".",
"endswith",
"(",
"self",
".",
"NORMAL",
")",
":",
"self",
".",
"_writer",
".",
"write",
"(",
"self",
".",
"NORMAL",
")"
] | 45.811321 | 19.09434 |
def changePlanParticipation(self, plan, take_part=True):
"""
Changes participation in a plan
:param plan: Plan to take part in or not
:param take_part: Whether to take part in the plan
:raises: FBchatException if request failed
"""
data = {
"event_reminder_id": plan.uid,
"guest_state": "GOING" if take_part else "DECLINED",
"acontext": ACONTEXT,
}
j = self._post(
self.req_url.PLAN_PARTICIPATION, data, fix_request=True, as_json=True
)
|
[
"def",
"changePlanParticipation",
"(",
"self",
",",
"plan",
",",
"take_part",
"=",
"True",
")",
":",
"data",
"=",
"{",
"\"event_reminder_id\"",
":",
"plan",
".",
"uid",
",",
"\"guest_state\"",
":",
"\"GOING\"",
"if",
"take_part",
"else",
"\"DECLINED\"",
",",
"\"acontext\"",
":",
"ACONTEXT",
",",
"}",
"j",
"=",
"self",
".",
"_post",
"(",
"self",
".",
"req_url",
".",
"PLAN_PARTICIPATION",
",",
"data",
",",
"fix_request",
"=",
"True",
",",
"as_json",
"=",
"True",
")"
] | 34.375 | 16.875 |
def tropocollagen(
cls, aa=28, major_radius=5.0, major_pitch=85.0, auto_build=True):
"""Creates a model of a collagen triple helix.
Parameters
----------
aa : int, optional
Number of amino acids per minor helix.
major_radius : float, optional
Radius of super helix.
major_pitch : float, optional
Pitch of super helix.
auto_build : bool, optional
If `True`, the model will be built as part of instantiation.
"""
instance = cls.from_parameters(
n=3, aa=aa, major_radius=major_radius, major_pitch=major_pitch,
phi_c_alpha=0.0, minor_helix_type='collagen', auto_build=False)
instance.major_handedness = ['r'] * 3
# default z-shifts taken from rise_per_residue of collagen helix
rpr_collagen = _helix_parameters['collagen'][1]
instance.z_shifts = [-rpr_collagen * 2, -rpr_collagen, 0.0]
instance.minor_repeats = [None] * 3
if auto_build:
instance.build()
return instance
|
[
"def",
"tropocollagen",
"(",
"cls",
",",
"aa",
"=",
"28",
",",
"major_radius",
"=",
"5.0",
",",
"major_pitch",
"=",
"85.0",
",",
"auto_build",
"=",
"True",
")",
":",
"instance",
"=",
"cls",
".",
"from_parameters",
"(",
"n",
"=",
"3",
",",
"aa",
"=",
"aa",
",",
"major_radius",
"=",
"major_radius",
",",
"major_pitch",
"=",
"major_pitch",
",",
"phi_c_alpha",
"=",
"0.0",
",",
"minor_helix_type",
"=",
"'collagen'",
",",
"auto_build",
"=",
"False",
")",
"instance",
".",
"major_handedness",
"=",
"[",
"'r'",
"]",
"*",
"3",
"# default z-shifts taken from rise_per_residue of collagen helix",
"rpr_collagen",
"=",
"_helix_parameters",
"[",
"'collagen'",
"]",
"[",
"1",
"]",
"instance",
".",
"z_shifts",
"=",
"[",
"-",
"rpr_collagen",
"*",
"2",
",",
"-",
"rpr_collagen",
",",
"0.0",
"]",
"instance",
".",
"minor_repeats",
"=",
"[",
"None",
"]",
"*",
"3",
"if",
"auto_build",
":",
"instance",
".",
"build",
"(",
")",
"return",
"instance"
] | 40.961538 | 16.230769 |
def contents(self, from_date=DEFAULT_DATETIME,
offset=None, max_contents=MAX_CONTENTS):
"""Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request
"""
resource = self.RCONTENTS + '/' + self.MSEARCH
# Set confluence query parameter (cql)
date = from_date.strftime("%Y-%m-%d %H:%M")
cql = self.VCQL % {'date': date}
# Set parameters
params = {
self.PCQL: cql,
self.PLIMIT: max_contents,
self.PEXPAND: self.PANCESTORS
}
if offset:
params[self.PSTART] = offset
for response in self._call(resource, params):
yield response
|
[
"def",
"contents",
"(",
"self",
",",
"from_date",
"=",
"DEFAULT_DATETIME",
",",
"offset",
"=",
"None",
",",
"max_contents",
"=",
"MAX_CONTENTS",
")",
":",
"resource",
"=",
"self",
".",
"RCONTENTS",
"+",
"'/'",
"+",
"self",
".",
"MSEARCH",
"# Set confluence query parameter (cql)",
"date",
"=",
"from_date",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
"cql",
"=",
"self",
".",
"VCQL",
"%",
"{",
"'date'",
":",
"date",
"}",
"# Set parameters",
"params",
"=",
"{",
"self",
".",
"PCQL",
":",
"cql",
",",
"self",
".",
"PLIMIT",
":",
"max_contents",
",",
"self",
".",
"PEXPAND",
":",
"self",
".",
"PANCESTORS",
"}",
"if",
"offset",
":",
"params",
"[",
"self",
".",
"PSTART",
"]",
"=",
"offset",
"for",
"response",
"in",
"self",
".",
"_call",
"(",
"resource",
",",
"params",
")",
":",
"yield",
"response"
] | 34.741935 | 19.677419 |
def getlist(self, section, option):
"""
returns the named option as a list, splitting the original value
by ','
"""
value = self.get(section, option)
if value:
return value.split(',')
else:
return None
|
[
"def",
"getlist",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"section",
",",
"option",
")",
"if",
"value",
":",
"return",
"value",
".",
"split",
"(",
"','",
")",
"else",
":",
"return",
"None"
] | 28 | 13.6 |
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
|
[
"def",
"write_config_static",
"(",
"system_dir",
",",
"system_filename_pattern",
",",
"model_dir",
",",
"model_filename_pattern",
",",
"config_file_path",
",",
"system_id",
"=",
"None",
")",
":",
"system_filenames",
"=",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"system_dir",
")",
"]",
"system_models_tuples",
"=",
"[",
"]",
"system_filename_pattern",
"=",
"re",
".",
"compile",
"(",
"system_filename_pattern",
")",
"for",
"system_filename",
"in",
"sorted",
"(",
"system_filenames",
")",
":",
"match",
"=",
"system_filename_pattern",
".",
"match",
"(",
"system_filename",
")",
"if",
"match",
":",
"id",
"=",
"match",
".",
"groups",
"(",
"0",
")",
"[",
"0",
"]",
"model_filenames",
"=",
"Rouge155",
".",
"__get_model_filenames_for_id",
"(",
"id",
",",
"model_dir",
",",
"model_filename_pattern",
")",
"system_models_tuples",
".",
"append",
"(",
"(",
"system_filename",
",",
"sorted",
"(",
"model_filenames",
")",
")",
")",
"if",
"not",
"system_models_tuples",
":",
"raise",
"Exception",
"(",
"\"Did not find any files matching the pattern {} \"",
"\"in the system summaries directory {}.\"",
".",
"format",
"(",
"system_filename_pattern",
".",
"pattern",
",",
"system_dir",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"config_file_path",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'<ROUGE-EVAL version=\"1.55\">'",
")",
"for",
"task_id",
",",
"(",
"system_filename",
",",
"model_filenames",
")",
"in",
"enumerate",
"(",
"system_models_tuples",
",",
"start",
"=",
"1",
")",
":",
"eval_string",
"=",
"Rouge155",
".",
"__get_eval_string",
"(",
"task_id",
",",
"system_id",
",",
"system_dir",
",",
"system_filename",
",",
"model_dir",
",",
"model_filenames",
")",
"f",
".",
"write",
"(",
"eval_string",
")",
"f",
".",
"write",
"(",
"\"</ROUGE-EVAL>\"",
")"
] | 48.163636 | 21.072727 |
def update_font(self):
"""Update font from Preferences"""
font = self.get_plugin_font()
for client in self.clients:
client.set_font(font)
|
[
"def",
"update_font",
"(",
"self",
")",
":",
"font",
"=",
"self",
".",
"get_plugin_font",
"(",
")",
"for",
"client",
"in",
"self",
".",
"clients",
":",
"client",
".",
"set_font",
"(",
"font",
")"
] | 34.6 | 6 |
def serialize_input(input, signature_script_hex=''):
""" Serializes a transaction input.
"""
if not (isinstance(input, dict) and 'transaction_hash' in input \
and 'output_index' in input):
raise Exception('Required parameters: transaction_hash, output_index')
if is_hex(str(input['transaction_hash'])) and len(str(input['transaction_hash'])) != 64:
raise Exception("Transaction hash '%s' must be 32 bytes" % input['transaction_hash'])
elif not is_hex(str(input['transaction_hash'])) and len(str(input['transaction_hash'])) != 32:
raise Exception("Transaction hash '%s' must be 32 bytes" % hexlify(input['transaction_hash']))
if not 'sequence' in input:
input['sequence'] = UINT_MAX
return ''.join([
flip_endian(input['transaction_hash']),
hexlify(struct.pack('<I', input['output_index'])),
hexlify(variable_length_int(len(signature_script_hex)/2)),
signature_script_hex,
hexlify(struct.pack('<I', input['sequence']))
])
|
[
"def",
"serialize_input",
"(",
"input",
",",
"signature_script_hex",
"=",
"''",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"input",
",",
"dict",
")",
"and",
"'transaction_hash'",
"in",
"input",
"and",
"'output_index'",
"in",
"input",
")",
":",
"raise",
"Exception",
"(",
"'Required parameters: transaction_hash, output_index'",
")",
"if",
"is_hex",
"(",
"str",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
")",
"and",
"len",
"(",
"str",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
")",
"!=",
"64",
":",
"raise",
"Exception",
"(",
"\"Transaction hash '%s' must be 32 bytes\"",
"%",
"input",
"[",
"'transaction_hash'",
"]",
")",
"elif",
"not",
"is_hex",
"(",
"str",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
")",
"and",
"len",
"(",
"str",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
")",
"!=",
"32",
":",
"raise",
"Exception",
"(",
"\"Transaction hash '%s' must be 32 bytes\"",
"%",
"hexlify",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
")",
"if",
"not",
"'sequence'",
"in",
"input",
":",
"input",
"[",
"'sequence'",
"]",
"=",
"UINT_MAX",
"return",
"''",
".",
"join",
"(",
"[",
"flip_endian",
"(",
"input",
"[",
"'transaction_hash'",
"]",
")",
",",
"hexlify",
"(",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"input",
"[",
"'output_index'",
"]",
")",
")",
",",
"hexlify",
"(",
"variable_length_int",
"(",
"len",
"(",
"signature_script_hex",
")",
"/",
"2",
")",
")",
",",
"signature_script_hex",
",",
"hexlify",
"(",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"input",
"[",
"'sequence'",
"]",
")",
")",
"]",
")"
] | 44.217391 | 26.391304 |
def find_from(path):
"""Find path of an .ensime config, searching recursively upward from path.
Args:
path (str): Path of a file or directory from where to start searching.
Returns:
str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
"""
realpath = os.path.realpath(path)
config_path = os.path.join(realpath, '.ensime')
if os.path.isfile(config_path):
return config_path
elif realpath == os.path.abspath('/'):
return None
else:
dirname = os.path.dirname(realpath)
return ProjectConfig.find_from(dirname)
|
[
"def",
"find_from",
"(",
"path",
")",
":",
"realpath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"realpath",
",",
"'.ensime'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
":",
"return",
"config_path",
"elif",
"realpath",
"==",
"os",
".",
"path",
".",
"abspath",
"(",
"'/'",
")",
":",
"return",
"None",
"else",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"realpath",
")",
"return",
"ProjectConfig",
".",
"find_from",
"(",
"dirname",
")"
] | 34.210526 | 19.421053 |
def GetSubkeyByIndex(self, index):
"""Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
"""
if index < 0 or index >= self._pyregf_key.number_of_sub_keys:
raise IndexError('Index out of bounds.')
pyregf_key = self._pyregf_key.get_sub_key(index)
if not pyregf_key:
return None
key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])
return REGFWinRegistryKey(pyregf_key, key_path=key_path)
|
[
"def",
"GetSubkeyByIndex",
"(",
"self",
",",
"index",
")",
":",
"if",
"index",
"<",
"0",
"or",
"index",
">=",
"self",
".",
"_pyregf_key",
".",
"number_of_sub_keys",
":",
"raise",
"IndexError",
"(",
"'Index out of bounds.'",
")",
"pyregf_key",
"=",
"self",
".",
"_pyregf_key",
".",
"get_sub_key",
"(",
"index",
")",
"if",
"not",
"pyregf_key",
":",
"return",
"None",
"key_path",
"=",
"key_paths",
".",
"JoinKeyPath",
"(",
"[",
"self",
".",
"_key_path",
",",
"pyregf_key",
".",
"name",
"]",
")",
"return",
"REGFWinRegistryKey",
"(",
"pyregf_key",
",",
"key_path",
"=",
"key_path",
")"
] | 28.333333 | 22.142857 |
def min(a, axis=None):
"""
Request the minimum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose minimum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested mean.
"""
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_MinStreamsHandler, _MinMaskedStreamsHandler,
a.dtype, {})
|
[
"def",
"min",
"(",
"a",
",",
"axis",
"=",
"None",
")",
":",
"axes",
"=",
"_normalise_axis",
"(",
"axis",
",",
"a",
")",
"assert",
"axes",
"is",
"not",
"None",
"and",
"len",
"(",
"axes",
")",
"==",
"1",
"return",
"_Aggregation",
"(",
"a",
",",
"axes",
"[",
"0",
"]",
",",
"_MinStreamsHandler",
",",
"_MinMaskedStreamsHandler",
",",
"a",
".",
"dtype",
",",
"{",
"}",
")"
] | 35.111111 | 20.296296 |
def view(self, start, end, max_items=None, *args, **kwargs):
""" Implements the CalendarView option to FindItem. The difference between filter() and view() is that filter()
only returns the master CalendarItem for recurring items, while view() unfolds recurring items and returns all
CalendarItem occurrences as one would normally expect when presenting a calendar.
Supports the same semantics as filter, except for 'start' and 'end' keyword attributes which are both required
and behave differently than filter. Here, they denote the start and end of the timespan of the view. All items
the overlap the timespan are returned (items that end exactly on 'start' are also returned, for some reason).
EWS does not allow combining CalendarView with search restrictions (filter and exclude).
'max_items' defines the maximum number of items returned in this view. Optional.
"""
qs = QuerySet(self).filter(*args, **kwargs)
qs.calendar_view = CalendarView(start=start, end=end, max_items=max_items)
return qs
|
[
"def",
"view",
"(",
"self",
",",
"start",
",",
"end",
",",
"max_items",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"qs",
"=",
"QuerySet",
"(",
"self",
")",
".",
"filter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"qs",
".",
"calendar_view",
"=",
"CalendarView",
"(",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"max_items",
"=",
"max_items",
")",
"return",
"qs"
] | 67.75 | 42.5 |
def verify_invoice_params(self, price, currency):
"""
Deprecated, will be made private in 2.4
"""
if re.match("^[A-Z]{3,3}$", currency) is None:
raise BitPayArgumentError("Currency is invalid.")
try:
float(price)
except:
raise BitPayArgumentError("Price must be formatted as a float")
|
[
"def",
"verify_invoice_params",
"(",
"self",
",",
"price",
",",
"currency",
")",
":",
"if",
"re",
".",
"match",
"(",
"\"^[A-Z]{3,3}$\"",
",",
"currency",
")",
"is",
"None",
":",
"raise",
"BitPayArgumentError",
"(",
"\"Currency is invalid.\"",
")",
"try",
":",
"float",
"(",
"price",
")",
"except",
":",
"raise",
"BitPayArgumentError",
"(",
"\"Price must be formatted as a float\"",
")"
] | 31.8 | 14.8 |
def _handle(self, nick, target, message, **kwargs):
""" client callback entrance """
for regex, (func, pattern) in self.routes.items():
match = regex.match(message)
if match:
self.client.loop.create_task(
func(nick, target, message, match, **kwargs))
|
[
"def",
"_handle",
"(",
"self",
",",
"nick",
",",
"target",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"regex",
",",
"(",
"func",
",",
"pattern",
")",
"in",
"self",
".",
"routes",
".",
"items",
"(",
")",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"message",
")",
"if",
"match",
":",
"self",
".",
"client",
".",
"loop",
".",
"create_task",
"(",
"func",
"(",
"nick",
",",
"target",
",",
"message",
",",
"match",
",",
"*",
"*",
"kwargs",
")",
")"
] | 45.714286 | 11.142857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.