repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
goldsborough/ecstasy | ecstasy/parser.py | https://github.com/goldsborough/ecstasy/blob/7faa54708d506696c2607ddb68866e66768072ad/ecstasy/parser.py#L133-L225 | def get_flags(self, args):
"""
Checks and retrieves positional and 'always' (keyword) flags
from the many ways in which they may be passed to the
constructor (or the beautify() method on package-level).
Positional arguments can be passed either:
* Individually, where each flag-combination is one positional argument.
* Packaged inside a list, which is then expanded. There can be
multiple of such lists passed as arguments because it facilitates
interaction with the ecastasy module (one may want to organize
and update styles in certain ways depending on one's program), but
each list will be expanded and all flag-combinations found inside
each list will be interpreted as a single style argument, as if it
had been passed in the way desribed above (individually).
'Always' arguments can be passed either:
* Individually, with keyword-argument syntax, i.e. <word>=<style>
* In a dictionary, which is expanded exactly like positional
arguments passed in lists (i.e. each key/value pair in the
dictionary is interpreted as if it had been passed individually,
as key=value to the constructor/the external beautify() method).
Note:
self.always is set equal to the keyword arguments passed to the
constructor and then modified directly (when 'always'-arguments
are found), while the positional arguments are put into a list
here and returned (i.e. no interaction with self.positional).
Arguments:
args (list): The positional arguments passed to the constructor.
Returns:
The positional arguments.
Raises:
errors.FlagError: If an invalid (out-of-range)
flag combination was passed.
errors.EcstasyError: If one of the arguments is of invalid type.
"""
positional = []
for argument in args:
# A flag is an instance of a subclass of
# flags.Flags if it was passed alone
if isinstance(argument, flags.Flags):
positional.append(argument)
# or is an integer if it was (bitwise) OR'd
# with another flag (a "flag combination")
elif isinstance(argument, int):
if argument < 0 or argument >= flags.LIMIT:
raise errors.FlagError("Flag value '{0}' is out of range "
"!".format(argument))
positional.append(argument)
# Dictionaries store 'always'-arguments
elif isinstance(argument, dict):
for key, value in argument.items():
# Simple 'always'-argument where one string
# is mapped to one formatting flag-combination
if isinstance(key, str):
self.always[key] = value
# Complex 'always'-argument with a
# tuple containing strings, each with the same
# flag-combination (same value)
elif isinstance(key, tuple):
for i in key:
self.always[i] = value
else:
raise errors.EcstasyError("Key '{0}' in dictionary "
"argument passed is neither "
"a string nor a tuple "
"of strings!".format(key))
elif isinstance(argument, collections.Iterable):
positional += self.get_flags(argument)
else:
raise errors.EcstasyError("Argument '{0}' is neither a flag, a "
"(bitwise) OR'd flag-combination, a "
"dictionary nor an iterable of "
"positional arguments "
"!".format(argument))
return positional | [
"def",
"get_flags",
"(",
"self",
",",
"args",
")",
":",
"positional",
"=",
"[",
"]",
"for",
"argument",
"in",
"args",
":",
"# A flag is an instance of a subclass of",
"# flags.Flags if it was passed alone",
"if",
"isinstance",
"(",
"argument",
",",
"flags",
".",
"Flags",
")",
":",
"positional",
".",
"append",
"(",
"argument",
")",
"# or is an integer if it was (bitwise) OR'd",
"# with another flag (a \"flag combination\")",
"elif",
"isinstance",
"(",
"argument",
",",
"int",
")",
":",
"if",
"argument",
"<",
"0",
"or",
"argument",
">=",
"flags",
".",
"LIMIT",
":",
"raise",
"errors",
".",
"FlagError",
"(",
"\"Flag value '{0}' is out of range \"",
"\"!\"",
".",
"format",
"(",
"argument",
")",
")",
"positional",
".",
"append",
"(",
"argument",
")",
"# Dictionaries store 'always'-arguments",
"elif",
"isinstance",
"(",
"argument",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"argument",
".",
"items",
"(",
")",
":",
"# Simple 'always'-argument where one string",
"# is mapped to one formatting flag-combination",
"if",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"self",
".",
"always",
"[",
"key",
"]",
"=",
"value",
"# Complex 'always'-argument with a",
"# tuple containing strings, each with the same",
"# flag-combination (same value)",
"elif",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"for",
"i",
"in",
"key",
":",
"self",
".",
"always",
"[",
"i",
"]",
"=",
"value",
"else",
":",
"raise",
"errors",
".",
"EcstasyError",
"(",
"\"Key '{0}' in dictionary \"",
"\"argument passed is neither \"",
"\"a string nor a tuple \"",
"\"of strings!\"",
".",
"format",
"(",
"key",
")",
")",
"elif",
"isinstance",
"(",
"argument",
",",
"collections",
".",
"Iterable",
")",
":",
"positional",
"+=",
"self",
".",
"get_flags",
"(",
"argument",
")",
"else",
":",
"raise",
"errors",
".",
"EcstasyError",
"(",
"\"Argument '{0}' is neither a flag, a \"",
"\"(bitwise) OR'd flag-combination, a \"",
"\"dictionary nor an iterable of \"",
"\"positional arguments \"",
"\"!\"",
".",
"format",
"(",
"argument",
")",
")",
"return",
"positional"
] | Checks and retrieves positional and 'always' (keyword) flags
from the many ways in which they may be passed to the
constructor (or the beautify() method on package-level).
Positional arguments can be passed either:
* Individually, where each flag-combination is one positional argument.
* Packaged inside a list, which is then expanded. There can be
multiple of such lists passed as arguments because it facilitates
interaction with the ecastasy module (one may want to organize
and update styles in certain ways depending on one's program), but
each list will be expanded and all flag-combinations found inside
each list will be interpreted as a single style argument, as if it
had been passed in the way desribed above (individually).
'Always' arguments can be passed either:
* Individually, with keyword-argument syntax, i.e. <word>=<style>
* In a dictionary, which is expanded exactly like positional
arguments passed in lists (i.e. each key/value pair in the
dictionary is interpreted as if it had been passed individually,
as key=value to the constructor/the external beautify() method).
Note:
self.always is set equal to the keyword arguments passed to the
constructor and then modified directly (when 'always'-arguments
are found), while the positional arguments are put into a list
here and returned (i.e. no interaction with self.positional).
Arguments:
args (list): The positional arguments passed to the constructor.
Returns:
The positional arguments.
Raises:
errors.FlagError: If an invalid (out-of-range)
flag combination was passed.
errors.EcstasyError: If one of the arguments is of invalid type. | [
"Checks",
"and",
"retrieves",
"positional",
"and",
"always",
"(",
"keyword",
")",
"flags",
"from",
"the",
"many",
"ways",
"in",
"which",
"they",
"may",
"be",
"passed",
"to",
"the",
"constructor",
"(",
"or",
"the",
"beautify",
"()",
"method",
"on",
"package",
"-",
"level",
")",
"."
] | python | train |
tcalmant/python-javaobj | javaobj/core.py | https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L572-L607 | def _read_and_exec_opcode(self, ident=0, expect=None):
"""
Reads the next opcode, and executes its handler
:param ident: Log identation level
:param expect: A list of expected opcodes
:return: A tuple: (opcode, result of the handler)
:raise IOError: Read opcode is not one of the expected ones
:raise RuntimeError: Unknown opcode
"""
position = self.object_stream.tell()
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})".format(
opid, OpCodeDebug.op_id(opid), position
),
ident,
)
if expect and opid not in expect:
raise IOError(
"Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})".format(
opid, OpCodeDebug.op_id(opid), position
)
)
try:
handler = self.opmap[opid]
except KeyError:
raise RuntimeError(
"Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})".format(
opid, position
)
)
else:
return opid, handler(ident=ident) | [
"def",
"_read_and_exec_opcode",
"(",
"self",
",",
"ident",
"=",
"0",
",",
"expect",
"=",
"None",
")",
":",
"position",
"=",
"self",
".",
"object_stream",
".",
"tell",
"(",
")",
"(",
"opid",
",",
")",
"=",
"self",
".",
"_readStruct",
"(",
"\">B\"",
")",
"log_debug",
"(",
"\"OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})\"",
".",
"format",
"(",
"opid",
",",
"OpCodeDebug",
".",
"op_id",
"(",
"opid",
")",
",",
"position",
")",
",",
"ident",
",",
")",
"if",
"expect",
"and",
"opid",
"not",
"in",
"expect",
":",
"raise",
"IOError",
"(",
"\"Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})\"",
".",
"format",
"(",
"opid",
",",
"OpCodeDebug",
".",
"op_id",
"(",
"opid",
")",
",",
"position",
")",
")",
"try",
":",
"handler",
"=",
"self",
".",
"opmap",
"[",
"opid",
"]",
"except",
"KeyError",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})\"",
".",
"format",
"(",
"opid",
",",
"position",
")",
")",
"else",
":",
"return",
"opid",
",",
"handler",
"(",
"ident",
"=",
"ident",
")"
] | Reads the next opcode, and executes its handler
:param ident: Log identation level
:param expect: A list of expected opcodes
:return: A tuple: (opcode, result of the handler)
:raise IOError: Read opcode is not one of the expected ones
:raise RuntimeError: Unknown opcode | [
"Reads",
"the",
"next",
"opcode",
"and",
"executes",
"its",
"handler"
] | python | train |
cds-astro/mocpy | mocpy/tmoc/tmoc.py | https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/tmoc/tmoc.py#L56-L82 | def from_time_ranges(cls, min_times, max_times, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
"""
min_times_arr = np.asarray(min_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
max_times_arr = np.asarray(max_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
intervals_arr = np.vstack((min_times_arr, max_times_arr + 1)).T
# degrade the TimeMoc to the order computer from ``delta_t``
order = TimeMOC.time_resolution_to_order(delta_t)
return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order) | [
"def",
"from_time_ranges",
"(",
"cls",
",",
"min_times",
",",
"max_times",
",",
"delta_t",
"=",
"DEFAULT_OBSERVATION_TIME",
")",
":",
"min_times_arr",
"=",
"np",
".",
"asarray",
"(",
"min_times",
".",
"jd",
"*",
"TimeMOC",
".",
"DAY_MICRO_SEC",
",",
"dtype",
"=",
"int",
")",
"max_times_arr",
"=",
"np",
".",
"asarray",
"(",
"max_times",
".",
"jd",
"*",
"TimeMOC",
".",
"DAY_MICRO_SEC",
",",
"dtype",
"=",
"int",
")",
"intervals_arr",
"=",
"np",
".",
"vstack",
"(",
"(",
"min_times_arr",
",",
"max_times_arr",
"+",
"1",
")",
")",
".",
"T",
"# degrade the TimeMoc to the order computer from ``delta_t``",
"order",
"=",
"TimeMOC",
".",
"time_resolution_to_order",
"(",
"delta_t",
")",
"return",
"TimeMOC",
"(",
"IntervalSet",
"(",
"intervals_arr",
")",
")",
".",
"degrade_to_order",
"(",
"order",
")"
] | Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC` | [
"Create",
"a",
"TimeMOC",
"from",
"a",
"range",
"defined",
"by",
"two",
"astropy",
".",
"time",
".",
"Time"
] | python | train |
ibis-project/ibis | ibis/pandas/execution/window.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/execution/window.py#L255-L291 | def execute_series_lead_lag_timedelta(
op, data, offset, default, aggcontext=None, **kwargs
):
"""An implementation of shifting a column relative to another one that is
in units of time rather than rows.
"""
# lagging adds time (delayed), leading subtracts time (moved up)
func = operator.add if isinstance(op, ops.Lag) else operator.sub
group_by = aggcontext.group_by
order_by = aggcontext.order_by
# get the parent object from which `data` originated
parent = aggcontext.parent
# get the DataFrame from the parent object, handling the DataFrameGroupBy
# case
parent_df = getattr(parent, 'obj', parent)
# index our parent df by grouping and ordering keys
indexed_original_df = parent_df.set_index(group_by + order_by)
# perform the time shift
adjusted_parent_df = parent_df.assign(
**{k: func(parent_df[k], offset) for k in order_by}
)
# index the parent *after* adjustment
adjusted_indexed_parent = adjusted_parent_df.set_index(group_by + order_by)
# get the column we care about
result = adjusted_indexed_parent[getattr(data, 'obj', data).name]
# reindex the shifted data by the original frame's index
result = result.reindex(indexed_original_df.index)
# add a default if necessary
return post_lead_lag(result, default) | [
"def",
"execute_series_lead_lag_timedelta",
"(",
"op",
",",
"data",
",",
"offset",
",",
"default",
",",
"aggcontext",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# lagging adds time (delayed), leading subtracts time (moved up)",
"func",
"=",
"operator",
".",
"add",
"if",
"isinstance",
"(",
"op",
",",
"ops",
".",
"Lag",
")",
"else",
"operator",
".",
"sub",
"group_by",
"=",
"aggcontext",
".",
"group_by",
"order_by",
"=",
"aggcontext",
".",
"order_by",
"# get the parent object from which `data` originated",
"parent",
"=",
"aggcontext",
".",
"parent",
"# get the DataFrame from the parent object, handling the DataFrameGroupBy",
"# case",
"parent_df",
"=",
"getattr",
"(",
"parent",
",",
"'obj'",
",",
"parent",
")",
"# index our parent df by grouping and ordering keys",
"indexed_original_df",
"=",
"parent_df",
".",
"set_index",
"(",
"group_by",
"+",
"order_by",
")",
"# perform the time shift",
"adjusted_parent_df",
"=",
"parent_df",
".",
"assign",
"(",
"*",
"*",
"{",
"k",
":",
"func",
"(",
"parent_df",
"[",
"k",
"]",
",",
"offset",
")",
"for",
"k",
"in",
"order_by",
"}",
")",
"# index the parent *after* adjustment",
"adjusted_indexed_parent",
"=",
"adjusted_parent_df",
".",
"set_index",
"(",
"group_by",
"+",
"order_by",
")",
"# get the column we care about",
"result",
"=",
"adjusted_indexed_parent",
"[",
"getattr",
"(",
"data",
",",
"'obj'",
",",
"data",
")",
".",
"name",
"]",
"# reindex the shifted data by the original frame's index",
"result",
"=",
"result",
".",
"reindex",
"(",
"indexed_original_df",
".",
"index",
")",
"# add a default if necessary",
"return",
"post_lead_lag",
"(",
"result",
",",
"default",
")"
] | An implementation of shifting a column relative to another one that is
in units of time rather than rows. | [
"An",
"implementation",
"of",
"shifting",
"a",
"column",
"relative",
"to",
"another",
"one",
"that",
"is",
"in",
"units",
"of",
"time",
"rather",
"than",
"rows",
"."
] | python | train |
jason-weirather/pythologist | pythologist/__init__.py | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L409-L451 | def merge_scores(self,df_addition,reference_markers='all',
addition_markers='all',on=['project_name','sample_name','frame_name','cell_index']):
"""
Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met.
"""
if isinstance(reference_markers, str):
reference_markers = self.scored_names
elif reference_markers is None: reference_markers = []
if isinstance(addition_markers, str):
addition_markers = df_addition.scored_names
elif addition_markers is None: addition_markers = []
df_addition = df_addition.copy()
df_addition['_key'] = 1
df = self.merge(df_addition[['scored_calls','_key']+on].rename(columns={'scored_calls':'_addition'}),
on = on,
how = 'left'
)
df['_sub1'] = df['scored_calls'].apply(lambda x:
dict((k,x[k]) for k in reference_markers)
)
df['_sub2'] = df['_addition'].apply(lambda x:
dict({}) if x!=x else dict((k,x[k]) for k in addition_markers) # handle NaN where we fail to match properly treat as empty
)
# combine the two dictionaries
df['scored_calls'] = df.apply(lambda x:
{**x['_sub1'],**x['_sub2']}
,1)
df = df.drop(columns=['_sub1','_sub2','_addition'])
df = df.drop(columns='_key').copy(),df[df['_key'].isna()].drop(columns='_key').copy()
if self.microns_per_pixel: df[0].microns_per_pixel = self.microns_per_pixel
if self.microns_per_pixel: df[1].microns_per_pixel = self.microns_per_pixel
return df | [
"def",
"merge_scores",
"(",
"self",
",",
"df_addition",
",",
"reference_markers",
"=",
"'all'",
",",
"addition_markers",
"=",
"'all'",
",",
"on",
"=",
"[",
"'project_name'",
",",
"'sample_name'",
",",
"'frame_name'",
",",
"'cell_index'",
"]",
")",
":",
"if",
"isinstance",
"(",
"reference_markers",
",",
"str",
")",
":",
"reference_markers",
"=",
"self",
".",
"scored_names",
"elif",
"reference_markers",
"is",
"None",
":",
"reference_markers",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"addition_markers",
",",
"str",
")",
":",
"addition_markers",
"=",
"df_addition",
".",
"scored_names",
"elif",
"addition_markers",
"is",
"None",
":",
"addition_markers",
"=",
"[",
"]",
"df_addition",
"=",
"df_addition",
".",
"copy",
"(",
")",
"df_addition",
"[",
"'_key'",
"]",
"=",
"1",
"df",
"=",
"self",
".",
"merge",
"(",
"df_addition",
"[",
"[",
"'scored_calls'",
",",
"'_key'",
"]",
"+",
"on",
"]",
".",
"rename",
"(",
"columns",
"=",
"{",
"'scored_calls'",
":",
"'_addition'",
"}",
")",
",",
"on",
"=",
"on",
",",
"how",
"=",
"'left'",
")",
"df",
"[",
"'_sub1'",
"]",
"=",
"df",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"(",
"k",
",",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"reference_markers",
")",
")",
"df",
"[",
"'_sub2'",
"]",
"=",
"df",
"[",
"'_addition'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"{",
"}",
")",
"if",
"x",
"!=",
"x",
"else",
"dict",
"(",
"(",
"k",
",",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"addition_markers",
")",
"# handle NaN where we fail to match properly treat as empty",
")",
"# combine the two dictionaries",
"df",
"[",
"'scored_calls'",
"]",
"=",
"df",
".",
"apply",
"(",
"lambda",
"x",
":",
"{",
"*",
"*",
"x",
"[",
"'_sub1'",
"]",
",",
"*",
"*",
"x",
"[",
"'_sub2'",
"]",
"}",
",",
"1",
")",
"df",
"=",
"df",
".",
"drop",
"(",
"columns",
"=",
"[",
"'_sub1'",
",",
"'_sub2'",
",",
"'_addition'",
"]",
")",
"df",
"=",
"df",
".",
"drop",
"(",
"columns",
"=",
"'_key'",
")",
".",
"copy",
"(",
")",
",",
"df",
"[",
"df",
"[",
"'_key'",
"]",
".",
"isna",
"(",
")",
"]",
".",
"drop",
"(",
"columns",
"=",
"'_key'",
")",
".",
"copy",
"(",
")",
"if",
"self",
".",
"microns_per_pixel",
":",
"df",
"[",
"0",
"]",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"if",
"self",
".",
"microns_per_pixel",
":",
"df",
"[",
"1",
"]",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"return",
"df"
] | Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met. | [
"Combine",
"CellDataFrames",
"that",
"differ",
"by",
"score",
"composition"
] | python | train |
saltstack/salt | salt/returners/cassandra_cql_return.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/cassandra_cql_return.py#L192-L243 | def returner(ret):
'''
Return data to one of potentially many clustered cassandra nodes
'''
query = '''INSERT INTO {keyspace}.salt_returns (
jid, minion_id, fun, alter_time, full_ret, return, success
) VALUES (?, ?, ?, ?, ?, ?, ?)'''.format(keyspace=_get_keyspace())
statement_arguments = ['{0}'.format(ret['jid']),
'{0}'.format(ret['id']),
'{0}'.format(ret['fun']),
int(time.time() * 1000),
salt.utils.json.dumps(ret).replace("'", "''"),
salt.utils.json.dumps(ret['return']).replace("'", "''"),
ret.get('success', False)]
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
__salt__['cassandra_cql.cql_query_with_prepare'](query,
'returner_return',
tuple(statement_arguments),
asynchronous=True)
except CommandExecutionError:
log.critical('Could not insert into salt_returns with Cassandra returner.')
raise
except Exception as e:
log.critical('Unexpected error while inserting into salt_returns: %s', e)
raise
# Store the last function called by the minion
# The data in salt.minions will be used by get_fun and get_minions
query = '''INSERT INTO {keyspace}.minions (
minion_id, last_fun
) VALUES (?, ?)'''.format(keyspace=_get_keyspace())
statement_arguments = ['{0}'.format(ret['id']), '{0}'.format(ret['fun'])]
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
__salt__['cassandra_cql.cql_query_with_prepare'](query,
'returner_minion',
tuple(statement_arguments),
asynchronous=True)
except CommandExecutionError:
log.critical('Could not store minion ID with Cassandra returner.')
raise
except Exception as e:
log.critical(
'Unexpected error while inserting minion ID into the minions '
'table: %s', e
)
raise | [
"def",
"returner",
"(",
"ret",
")",
":",
"query",
"=",
"'''INSERT INTO {keyspace}.salt_returns (\n jid, minion_id, fun, alter_time, full_ret, return, success\n ) VALUES (?, ?, ?, ?, ?, ?, ?)'''",
".",
"format",
"(",
"keyspace",
"=",
"_get_keyspace",
"(",
")",
")",
"statement_arguments",
"=",
"[",
"'{0}'",
".",
"format",
"(",
"ret",
"[",
"'jid'",
"]",
")",
",",
"'{0}'",
".",
"format",
"(",
"ret",
"[",
"'id'",
"]",
")",
",",
"'{0}'",
".",
"format",
"(",
"ret",
"[",
"'fun'",
"]",
")",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"ret",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"''\"",
")",
",",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"ret",
"[",
"'return'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"''\"",
")",
",",
"ret",
".",
"get",
"(",
"'success'",
",",
"False",
")",
"]",
"# cassandra_cql.cql_query may raise a CommandExecutionError",
"try",
":",
"__salt__",
"[",
"'cassandra_cql.cql_query_with_prepare'",
"]",
"(",
"query",
",",
"'returner_return'",
",",
"tuple",
"(",
"statement_arguments",
")",
",",
"asynchronous",
"=",
"True",
")",
"except",
"CommandExecutionError",
":",
"log",
".",
"critical",
"(",
"'Could not insert into salt_returns with Cassandra returner.'",
")",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"'Unexpected error while inserting into salt_returns: %s'",
",",
"e",
")",
"raise",
"# Store the last function called by the minion",
"# The data in salt.minions will be used by get_fun and get_minions",
"query",
"=",
"'''INSERT INTO {keyspace}.minions (\n minion_id, last_fun\n ) VALUES (?, ?)'''",
".",
"format",
"(",
"keyspace",
"=",
"_get_keyspace",
"(",
")",
")",
"statement_arguments",
"=",
"[",
"'{0}'",
".",
"format",
"(",
"ret",
"[",
"'id'",
"]",
")",
",",
"'{0}'",
".",
"format",
"(",
"ret",
"[",
"'fun'",
"]",
")",
"]",
"# cassandra_cql.cql_query may raise a CommandExecutionError",
"try",
":",
"__salt__",
"[",
"'cassandra_cql.cql_query_with_prepare'",
"]",
"(",
"query",
",",
"'returner_minion'",
",",
"tuple",
"(",
"statement_arguments",
")",
",",
"asynchronous",
"=",
"True",
")",
"except",
"CommandExecutionError",
":",
"log",
".",
"critical",
"(",
"'Could not store minion ID with Cassandra returner.'",
")",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"'Unexpected error while inserting minion ID into the minions '",
"'table: %s'",
",",
"e",
")",
"raise"
] | Return data to one of potentially many clustered cassandra nodes | [
"Return",
"data",
"to",
"one",
"of",
"potentially",
"many",
"clustered",
"cassandra",
"nodes"
] | python | train |
wandb/client | wandb/apis/internal.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L445-L488 | def run_config(self, project, run=None, entity=None):
"""Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
"""
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
config
commit
patch
files(names: ["wandb-metadata.json"]) {
edges {
node {
url
}
}
}
}
}
}
''')
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata) | [
"def",
"run_config",
"(",
"self",
",",
"project",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Model($name: String!, $entity: String!, $run: String!) {\n model(name: $name, entityName: $entity) {\n bucket(name: $run) {\n config\n commit\n patch\n files(names: [\"wandb-metadata.json\"]) {\n edges {\n node {\n url\n }\n }\n }\n }\n }\n }\n '''",
")",
"response",
"=",
"self",
".",
"gql",
"(",
"query",
",",
"variable_values",
"=",
"{",
"'name'",
":",
"project",
",",
"'run'",
":",
"run",
",",
"'entity'",
":",
"entity",
"}",
")",
"if",
"response",
"[",
"'model'",
"]",
"==",
"None",
":",
"raise",
"ValueError",
"(",
"\"Run {}/{}/{} not found\"",
".",
"format",
"(",
"entity",
",",
"project",
",",
"run",
")",
")",
"run",
"=",
"response",
"[",
"'model'",
"]",
"[",
"'bucket'",
"]",
"commit",
"=",
"run",
"[",
"'commit'",
"]",
"patch",
"=",
"run",
"[",
"'patch'",
"]",
"config",
"=",
"json",
".",
"loads",
"(",
"run",
"[",
"'config'",
"]",
"or",
"'{}'",
")",
"if",
"len",
"(",
"run",
"[",
"'files'",
"]",
"[",
"'edges'",
"]",
")",
">",
"0",
":",
"url",
"=",
"run",
"[",
"'files'",
"]",
"[",
"'edges'",
"]",
"[",
"0",
"]",
"[",
"'node'",
"]",
"[",
"'url'",
"]",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"res",
".",
"raise_for_status",
"(",
")",
"metadata",
"=",
"res",
".",
"json",
"(",
")",
"else",
":",
"metadata",
"=",
"{",
"}",
"return",
"(",
"commit",
",",
"config",
",",
"patch",
",",
"metadata",
")"
] | Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to. | [
"Get",
"the",
"relevant",
"configs",
"for",
"a",
"run"
] | python | train |
fm4d/PyMarkovTextGenerator | markov.py | https://github.com/fm4d/PyMarkovTextGenerator/blob/4a7e8e2cfe14c9745aba6b9df7d7b402a9029a37/markov.py#L157-L168 | def remove_chain(self, name):
"""
Remove chain from current shelve file
Args:
name: chain name
"""
if name in self.chains:
delattr(self.chains, name)
else:
raise ValueError("Chain with this name not found") | [
"def",
"remove_chain",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"chains",
":",
"delattr",
"(",
"self",
".",
"chains",
",",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Chain with this name not found\"",
")"
] | Remove chain from current shelve file
Args:
name: chain name | [
"Remove",
"chain",
"from",
"current",
"shelve",
"file"
] | python | test |
bigchaindb/bigchaindb | bigchaindb/lib.py | https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/lib.py#L453-L459 | def store_validator_set(self, height, validators):
"""Store validator set at a given `height`.
NOTE: If the validator set already exists at that `height` then an
exception will be raised.
"""
return backend.query.store_validator_set(self.connection, {'height': height,
'validators': validators}) | [
"def",
"store_validator_set",
"(",
"self",
",",
"height",
",",
"validators",
")",
":",
"return",
"backend",
".",
"query",
".",
"store_validator_set",
"(",
"self",
".",
"connection",
",",
"{",
"'height'",
":",
"height",
",",
"'validators'",
":",
"validators",
"}",
")"
] | Store validator set at a given `height`.
NOTE: If the validator set already exists at that `height` then an
exception will be raised. | [
"Store",
"validator",
"set",
"at",
"a",
"given",
"height",
".",
"NOTE",
":",
"If",
"the",
"validator",
"set",
"already",
"exists",
"at",
"that",
"height",
"then",
"an",
"exception",
"will",
"be",
"raised",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/utils_swiss_gmpe.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/utils_swiss_gmpe.py#L55-L78 | def _compute_phi_ss(C, mag, c1_dists, log_phi_ss, mean_phi_ss):
"""
Returns the embeded logic tree for single station sigma
as defined to be used in the Swiss Hazard Model 2014:
the single station sigma branching levels combines with equal
weights: the phi_ss reported as function of magnitude
as proposed by Rodriguez-Marek et al (2013) with the mean
(mean_phi_ss) single station value;
the resulted phi_ss is in natural logarithm units
"""
phi_ss = 0
if mag < C['Mc1']:
phi_ss = c1_dists
elif mag >= C['Mc1'] and mag <= C['Mc2']:
phi_ss = c1_dists + \
(C['C2'] - c1_dists) * \
((mag - C['Mc1']) / (C['Mc2'] - C['Mc1']))
elif mag > C['Mc2']:
phi_ss = C['C2']
return (phi_ss * 0.50 + mean_phi_ss * 0.50) / log_phi_ss | [
"def",
"_compute_phi_ss",
"(",
"C",
",",
"mag",
",",
"c1_dists",
",",
"log_phi_ss",
",",
"mean_phi_ss",
")",
":",
"phi_ss",
"=",
"0",
"if",
"mag",
"<",
"C",
"[",
"'Mc1'",
"]",
":",
"phi_ss",
"=",
"c1_dists",
"elif",
"mag",
">=",
"C",
"[",
"'Mc1'",
"]",
"and",
"mag",
"<=",
"C",
"[",
"'Mc2'",
"]",
":",
"phi_ss",
"=",
"c1_dists",
"+",
"(",
"C",
"[",
"'C2'",
"]",
"-",
"c1_dists",
")",
"*",
"(",
"(",
"mag",
"-",
"C",
"[",
"'Mc1'",
"]",
")",
"/",
"(",
"C",
"[",
"'Mc2'",
"]",
"-",
"C",
"[",
"'Mc1'",
"]",
")",
")",
"elif",
"mag",
">",
"C",
"[",
"'Mc2'",
"]",
":",
"phi_ss",
"=",
"C",
"[",
"'C2'",
"]",
"return",
"(",
"phi_ss",
"*",
"0.50",
"+",
"mean_phi_ss",
"*",
"0.50",
")",
"/",
"log_phi_ss"
] | Returns the embeded logic tree for single station sigma
as defined to be used in the Swiss Hazard Model 2014:
the single station sigma branching levels combines with equal
weights: the phi_ss reported as function of magnitude
as proposed by Rodriguez-Marek et al (2013) with the mean
(mean_phi_ss) single station value;
the resulted phi_ss is in natural logarithm units | [
"Returns",
"the",
"embeded",
"logic",
"tree",
"for",
"single",
"station",
"sigma",
"as",
"defined",
"to",
"be",
"used",
"in",
"the",
"Swiss",
"Hazard",
"Model",
"2014",
":",
"the",
"single",
"station",
"sigma",
"branching",
"levels",
"combines",
"with",
"equal",
"weights",
":",
"the",
"phi_ss",
"reported",
"as",
"function",
"of",
"magnitude",
"as",
"proposed",
"by",
"Rodriguez",
"-",
"Marek",
"et",
"al",
"(",
"2013",
")",
"with",
"the",
"mean",
"(",
"mean_phi_ss",
")",
"single",
"station",
"value",
";",
"the",
"resulted",
"phi_ss",
"is",
"in",
"natural",
"logarithm",
"units"
] | python | train |
google/grr | grr/server/grr_response_server/data_store.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/data_store.py#L612-L645 | def LockRetryWrapper(self,
subject,
retrywrap_timeout=1,
retrywrap_max_timeout=10,
blocking=True,
lease_time=None):
"""Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached.
"""
timeout = 0
while timeout < retrywrap_max_timeout:
try:
return self.DBSubjectLock(subject, lease_time=lease_time)
except DBSubjectLockError:
if not blocking:
raise
stats_collector_instance.Get().IncrementCounter("datastore_retries")
time.sleep(retrywrap_timeout)
timeout += retrywrap_timeout
raise DBSubjectLockError("Retry number exceeded.") | [
"def",
"LockRetryWrapper",
"(",
"self",
",",
"subject",
",",
"retrywrap_timeout",
"=",
"1",
",",
"retrywrap_max_timeout",
"=",
"10",
",",
"blocking",
"=",
"True",
",",
"lease_time",
"=",
"None",
")",
":",
"timeout",
"=",
"0",
"while",
"timeout",
"<",
"retrywrap_max_timeout",
":",
"try",
":",
"return",
"self",
".",
"DBSubjectLock",
"(",
"subject",
",",
"lease_time",
"=",
"lease_time",
")",
"except",
"DBSubjectLockError",
":",
"if",
"not",
"blocking",
":",
"raise",
"stats_collector_instance",
".",
"Get",
"(",
")",
".",
"IncrementCounter",
"(",
"\"datastore_retries\"",
")",
"time",
".",
"sleep",
"(",
"retrywrap_timeout",
")",
"timeout",
"+=",
"retrywrap_timeout",
"raise",
"DBSubjectLockError",
"(",
"\"Retry number exceeded.\"",
")"
] | Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached. | [
"Retry",
"a",
"DBSubjectLock",
"until",
"it",
"succeeds",
"."
] | python | train |
SHTOOLS/SHTOOLS | pyshtools/shclasses/shgravcoeffs.py | https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shgravcoeffs.py#L248-L317 | def from_zeros(self, lmax, gm, r0, omega=None, errors=False,
normalization='4pi', csphase=1):
"""
Initialize the class with spherical harmonic coefficients set to zero
from degree 1 to lmax, and set the degree 0 term to 1.
Usage
-----
x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors,
normalization, csphase])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
lmax : int
The maximum spherical harmonic degree l of the coefficients.
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
r0 : float
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
errors : bool, optional, default = False
If True, initialize the attribute errors with zeros.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
coeffs = _np.zeros((2, lmax + 1, lmax + 1))
coeffs[0, 0, 0] = 1.0
if errors is False:
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
normalization=normalization.lower(),
csphase=csphase)
else:
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
errors=_np.zeros((2, lmax + 1, lmax + 1)),
normalization=normalization.lower(),
csphase=csphase)
return clm | [
"def",
"from_zeros",
"(",
"self",
",",
"lmax",
",",
"gm",
",",
"r0",
",",
"omega",
"=",
"None",
",",
"errors",
"=",
"False",
",",
"normalization",
"=",
"'4pi'",
",",
"csphase",
"=",
"1",
")",
":",
"if",
"normalization",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'4pi'",
",",
"'ortho'",
",",
"'schmidt'",
",",
"'unnorm'",
")",
":",
"raise",
"ValueError",
"(",
"\"The normalization must be '4pi', 'ortho', 'schmidt', \"",
"\"or 'unnorm'. Input value was {:s}.\"",
".",
"format",
"(",
"repr",
"(",
"normalization",
")",
")",
")",
"if",
"csphase",
"!=",
"1",
"and",
"csphase",
"!=",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"csphase must be either 1 or -1. Input value was {:s}.\"",
".",
"format",
"(",
"repr",
"(",
"csphase",
")",
")",
")",
"if",
"normalization",
".",
"lower",
"(",
")",
"==",
"'unnorm'",
"and",
"lmax",
">",
"85",
":",
"_warnings",
".",
"warn",
"(",
"\"Calculations using unnormalized coefficients \"",
"\"are stable only for degrees less than or equal \"",
"\"to 85. lmax for the coefficients will be set to \"",
"\"85. Input value was {:d}.\"",
".",
"format",
"(",
"lmax",
")",
",",
"category",
"=",
"RuntimeWarning",
")",
"lmax",
"=",
"85",
"coeffs",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"lmax",
"+",
"1",
",",
"lmax",
"+",
"1",
")",
")",
"coeffs",
"[",
"0",
",",
"0",
",",
"0",
"]",
"=",
"1.0",
"if",
"errors",
"is",
"False",
":",
"clm",
"=",
"SHGravRealCoeffs",
"(",
"coeffs",
",",
"gm",
"=",
"gm",
",",
"r0",
"=",
"r0",
",",
"omega",
"=",
"omega",
",",
"normalization",
"=",
"normalization",
".",
"lower",
"(",
")",
",",
"csphase",
"=",
"csphase",
")",
"else",
":",
"clm",
"=",
"SHGravRealCoeffs",
"(",
"coeffs",
",",
"gm",
"=",
"gm",
",",
"r0",
"=",
"r0",
",",
"omega",
"=",
"omega",
",",
"errors",
"=",
"_np",
".",
"zeros",
"(",
"(",
"2",
",",
"lmax",
"+",
"1",
",",
"lmax",
"+",
"1",
")",
")",
",",
"normalization",
"=",
"normalization",
".",
"lower",
"(",
")",
",",
"csphase",
"=",
"csphase",
")",
"return",
"clm"
] | Initialize the class with spherical harmonic coefficients set to zero
from degree 1 to lmax, and set the degree 0 term to 1.
Usage
-----
x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors,
normalization, csphase])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
lmax : int
The maximum spherical harmonic degree l of the coefficients.
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
r0 : float
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
errors : bool, optional, default = False
If True, initialize the attribute errors with zeros.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it. | [
"Initialize",
"the",
"class",
"with",
"spherical",
"harmonic",
"coefficients",
"set",
"to",
"zero",
"from",
"degree",
"1",
"to",
"lmax",
"and",
"set",
"the",
"degree",
"0",
"term",
"to",
"1",
"."
] | python | train |
gbiggs/rtctree | rtctree/ports.py | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/ports.py#L192-L205 | def connections(self):
'''A list of connections to or from this port.
This list will be created at the first reference to this property.
This means that the first reference may be delayed by CORBA calls,
but others will return quickly (unless a delayed reparse has been
triggered).
'''
with self._mutex:
if not self._connections:
self._connections = [Connection(cp, self) \
for cp in self._obj.get_connector_profiles()]
return self._connections | [
"def",
"connections",
"(",
"self",
")",
":",
"with",
"self",
".",
"_mutex",
":",
"if",
"not",
"self",
".",
"_connections",
":",
"self",
".",
"_connections",
"=",
"[",
"Connection",
"(",
"cp",
",",
"self",
")",
"for",
"cp",
"in",
"self",
".",
"_obj",
".",
"get_connector_profiles",
"(",
")",
"]",
"return",
"self",
".",
"_connections"
] | A list of connections to or from this port.
This list will be created at the first reference to this property.
This means that the first reference may be delayed by CORBA calls,
but others will return quickly (unless a delayed reparse has been
triggered). | [
"A",
"list",
"of",
"connections",
"to",
"or",
"from",
"this",
"port",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2271-L2305 | def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon | [
"def",
"add_l2_normalize",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"epsilon",
"=",
"1e-5",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"l2normalize",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon"
] | Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn | [
"Add",
"L2",
"normalize",
"layer",
".",
"Normalizes",
"the",
"input",
"by",
"the",
"L2",
"norm",
"i",
".",
"e",
".",
"divides",
"by",
"the",
"the",
"square",
"root",
"of",
"the",
"sum",
"of",
"squares",
"of",
"all",
"elements",
"of",
"the",
"input",
"along",
"C",
"H",
"and",
"W",
"dimensions",
"."
] | python | train |
inveniosoftware-attic/invenio-utils | invenio_utils/orcid.py | https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/orcid.py#L26-L41 | def search_authors(self, query):
query = query.replace(" ", "+")
"""
FIXME: Don't create a process to do this!
"""
p = subprocess.Popen("curl -H 'Accept: application/orcid+json' \
'http://pub.sandbox-1.orcid.org/search/orcid-bio?q=" +
query + "&start=0&rows=10'",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
jsonResults = ""
for line in p.stdout.readlines():
jsonResults = line
self.authorsDict = json.loads(jsonResults) | [
"def",
"search_authors",
"(",
"self",
",",
"query",
")",
":",
"query",
"=",
"query",
".",
"replace",
"(",
"\" \"",
",",
"\"+\"",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"\"curl -H 'Accept: application/orcid+json' \\\n 'http://pub.sandbox-1.orcid.org/search/orcid-bio?q=\"",
"+",
"query",
"+",
"\"&start=0&rows=10'\"",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"jsonResults",
"=",
"\"\"",
"for",
"line",
"in",
"p",
".",
"stdout",
".",
"readlines",
"(",
")",
":",
"jsonResults",
"=",
"line",
"self",
".",
"authorsDict",
"=",
"json",
".",
"loads",
"(",
"jsonResults",
")"
] | FIXME: Don't create a process to do this! | [
"FIXME",
":",
"Don",
"t",
"create",
"a",
"process",
"to",
"do",
"this!"
] | python | train |
etobella/python-xmlsig | src/xmlsig/signature_context.py | https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L171-L205 | def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found') | [
"def",
"transform",
"(",
"self",
",",
"transform",
",",
"node",
")",
":",
"method",
"=",
"transform",
".",
"get",
"(",
"'Algorithm'",
")",
"if",
"method",
"not",
"in",
"constants",
".",
"TransformUsageDSigTransform",
":",
"raise",
"Exception",
"(",
"'Method not allowed'",
")",
"# C14N methods are allowed",
"if",
"method",
"in",
"constants",
".",
"TransformUsageC14NMethod",
":",
"return",
"self",
".",
"canonicalization",
"(",
"method",
",",
"etree",
".",
"fromstring",
"(",
"node",
")",
")",
"# Enveloped method removes the Signature Node from the element",
"if",
"method",
"==",
"constants",
".",
"TransformEnveloped",
":",
"tree",
"=",
"transform",
".",
"getroottree",
"(",
")",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"node",
")",
"signature",
"=",
"root",
".",
"find",
"(",
"tree",
".",
"getelementpath",
"(",
"transform",
".",
"getparent",
"(",
")",
".",
"getparent",
"(",
")",
".",
"getparent",
"(",
")",
".",
"getparent",
"(",
")",
")",
")",
"root",
".",
"remove",
"(",
"signature",
")",
"return",
"self",
".",
"canonicalization",
"(",
"constants",
".",
"TransformInclC14N",
",",
"root",
")",
"if",
"method",
"==",
"constants",
".",
"TransformBase64",
":",
"try",
":",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"node",
")",
"return",
"base64",
".",
"b64decode",
"(",
"root",
".",
"text",
")",
"except",
"Exception",
":",
"return",
"base64",
".",
"b64decode",
"(",
"node",
")",
"raise",
"Exception",
"(",
"'Method not found'",
")"
] | Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String | [
"Transforms",
"a",
"node",
"following",
"the",
"transform",
"especification",
":",
"param",
"transform",
":",
"Transform",
"node",
":",
"type",
"transform",
":",
"lxml",
".",
"etree",
".",
"Element",
":",
"param",
"node",
":",
"Element",
"to",
"transform",
":",
"type",
"node",
":",
"str",
":",
"return",
":",
"Transformed",
"node",
"in",
"a",
"String"
] | python | train |
PmagPy/PmagPy | programs/deprecated/basemap_magic.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/basemap_magic.py#L17-L169 | def main():
"""
NAME
basemap_magic.py
NB: this program no longer maintained - use plot_map_pts.py for greater functionality
DESCRIPTION
makes a map of locations in er_sites.txt
SYNTAX
basemap_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f SFILE, specify er_sites.txt or pmag_results.txt format file
-res [c,l,i,h] specify resolution (crude,low,intermediate,high)
-etp plot the etopo20 topographic mesh
-pad [LAT LON] pad bounding box by LAT/LON (default is [.5 .5] degrees)
-grd SPACE specify grid spacing
-prj [lcc] , specify projection (lcc=lambert conic conformable), default is mercator
-n print site names (default is not)
-l print location names (default is not)
-o color ocean blue/land green (default is not)
-R don't plot details of rivers
-B don't plot national/state boundaries, etc.
-sav save plot and quit quietly
-fmt [png,svg,eps,jpg,pdf] specify format for output, default is pdf
DEFAULTS
SFILE: 'er_sites.txt'
resolution: intermediate
saved images are in pdf
"""
dir_path = '.'
sites_file = 'er_sites.txt'
ocean = 0
res = 'i'
proj = 'merc'
prn_name = 0
prn_loc = 0
fancy = 0
rivers, boundaries = 0, 0
padlon, padlat, gridspace, details = .5, .5, .5, 1
fmt = 'pdf'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
sites_file = sys.argv[ind+1]
if '-res' in sys.argv:
ind = sys.argv.index('-res')
res = sys.argv[ind+1]
if '-etp' in sys.argv:
fancy = 1
if '-n' in sys.argv:
prn_name = 1
if '-l' in sys.argv:
prn_loc = 1
if '-o' in sys.argv:
ocean = 1
if '-R' in sys.argv:
rivers = 0
if '-B' in sys.argv:
boundaries = 0
if '-prj' in sys.argv:
ind = sys.argv.index('-prj')
proj = sys.argv[ind+1]
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
verbose = pmagplotlib.verbose
if '-sav' in sys.argv:
verbose = 0
if '-pad' in sys.argv:
ind = sys.argv.index('-pad')
padlat = float(sys.argv[ind+1])
padlon = float(sys.argv[ind+2])
if '-grd' in sys.argv:
ind = sys.argv.index('-grd')
gridspace = float(sys.argv[ind+1])
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
sites_file = dir_path+'/'+sites_file
location = ""
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in er_sites file
Sites, file_type = pmag.magic_read(sites_file)
if 'results' in file_type:
latkey = 'average_lat'
lonkey = 'average_lon'
namekey = 'pmag_result_name'
lockey = 'er_location_names'
else:
latkey = 'site_lat'
lonkey = 'site_lon'
namekey = 'er_site_name'
lockey = 'er_location_name'
lats, lons = [], []
slats, slons = [], []
names, locs = [], []
for site in Sites:
if prn_loc == 1 and location == "":
location = site['er_location_name']
lats.append(float(site[latkey]))
l = float(site[lonkey])
if l < 0:
l = l+360. # make positive
lons.append(l)
if prn_name == 1:
names.append(site[namekey])
if prn_loc == 1:
locs.append(site[lockey])
for lat in lats:
slats.append(lat)
for lon in lons:
slons.append(lon)
Opts = {'res': res, 'proj': proj, 'loc_name': locs, 'padlon': padlon, 'padlat': padlat, 'latmin': numpy.min(slats)-padlat, 'latmax': numpy.max(
slats)+padlat, 'lonmin': numpy.min(slons)-padlon, 'lonmax': numpy.max(slons)+padlon, 'sym': 'ro', 'boundinglat': 0., 'pltgrid': 1.}
Opts['lon_0'] = 0.5*(numpy.min(slons)+numpy.max(slons))
Opts['lat_0'] = 0.5*(numpy.min(slats)+numpy.max(slats))
Opts['names'] = names
Opts['gridspace'] = gridspace
Opts['details'] = {'coasts': 1, 'rivers': 1,
'states': 1, 'countries': 1, 'ocean': 0}
if ocean == 1:
Opts['details']['ocean'] = 1
if rivers == 1:
Opts['details']['rivers'] = 0
if boundaries == 1:
Opts['details']['states'] = 0
Opts['details']['countries'] = 0
Opts['details']['fancy'] = fancy
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
if verbose:
pmagplotlib.draw_figs(FIG)
files = {}
for key in list(FIG.keys()):
files[key] = 'Site_map'+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['map'] = 'Site Map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif verbose:
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
else:
pmagplotlib.save_plots(FIG, files) | [
"def",
"main",
"(",
")",
":",
"dir_path",
"=",
"'.'",
"sites_file",
"=",
"'er_sites.txt'",
"ocean",
"=",
"0",
"res",
"=",
"'i'",
"proj",
"=",
"'merc'",
"prn_name",
"=",
"0",
"prn_loc",
"=",
"0",
"fancy",
"=",
"0",
"rivers",
",",
"boundaries",
"=",
"0",
",",
"0",
"padlon",
",",
"padlat",
",",
"gridspace",
",",
"details",
"=",
".5",
",",
".5",
",",
".5",
",",
"1",
"fmt",
"=",
"'pdf'",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"sites_file",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-res'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-res'",
")",
"res",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-etp'",
"in",
"sys",
".",
"argv",
":",
"fancy",
"=",
"1",
"if",
"'-n'",
"in",
"sys",
".",
"argv",
":",
"prn_name",
"=",
"1",
"if",
"'-l'",
"in",
"sys",
".",
"argv",
":",
"prn_loc",
"=",
"1",
"if",
"'-o'",
"in",
"sys",
".",
"argv",
":",
"ocean",
"=",
"1",
"if",
"'-R'",
"in",
"sys",
".",
"argv",
":",
"rivers",
"=",
"0",
"if",
"'-B'",
"in",
"sys",
".",
"argv",
":",
"boundaries",
"=",
"0",
"if",
"'-prj'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-prj'",
")",
"proj",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-fmt'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-fmt'",
")",
"fmt",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"verbose",
"=",
"pmagplotlib",
".",
"verbose",
"if",
"'-sav'",
"in",
"sys",
".",
"argv",
":",
"verbose",
"=",
"0",
"if",
"'-pad'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-pad'",
")",
"padlat",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"padlon",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"2",
"]",
")",
"if",
"'-grd'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-grd'",
")",
"gridspace",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"sites_file",
"=",
"dir_path",
"+",
"'/'",
"+",
"sites_file",
"location",
"=",
"\"\"",
"FIG",
"=",
"{",
"'map'",
":",
"1",
"}",
"pmagplotlib",
".",
"plot_init",
"(",
"FIG",
"[",
"'map'",
"]",
",",
"6",
",",
"6",
")",
"# read in er_sites file",
"Sites",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"sites_file",
")",
"if",
"'results'",
"in",
"file_type",
":",
"latkey",
"=",
"'average_lat'",
"lonkey",
"=",
"'average_lon'",
"namekey",
"=",
"'pmag_result_name'",
"lockey",
"=",
"'er_location_names'",
"else",
":",
"latkey",
"=",
"'site_lat'",
"lonkey",
"=",
"'site_lon'",
"namekey",
"=",
"'er_site_name'",
"lockey",
"=",
"'er_location_name'",
"lats",
",",
"lons",
"=",
"[",
"]",
",",
"[",
"]",
"slats",
",",
"slons",
"=",
"[",
"]",
",",
"[",
"]",
"names",
",",
"locs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"site",
"in",
"Sites",
":",
"if",
"prn_loc",
"==",
"1",
"and",
"location",
"==",
"\"\"",
":",
"location",
"=",
"site",
"[",
"'er_location_name'",
"]",
"lats",
".",
"append",
"(",
"float",
"(",
"site",
"[",
"latkey",
"]",
")",
")",
"l",
"=",
"float",
"(",
"site",
"[",
"lonkey",
"]",
")",
"if",
"l",
"<",
"0",
":",
"l",
"=",
"l",
"+",
"360.",
"# make positive",
"lons",
".",
"append",
"(",
"l",
")",
"if",
"prn_name",
"==",
"1",
":",
"names",
".",
"append",
"(",
"site",
"[",
"namekey",
"]",
")",
"if",
"prn_loc",
"==",
"1",
":",
"locs",
".",
"append",
"(",
"site",
"[",
"lockey",
"]",
")",
"for",
"lat",
"in",
"lats",
":",
"slats",
".",
"append",
"(",
"lat",
")",
"for",
"lon",
"in",
"lons",
":",
"slons",
".",
"append",
"(",
"lon",
")",
"Opts",
"=",
"{",
"'res'",
":",
"res",
",",
"'proj'",
":",
"proj",
",",
"'loc_name'",
":",
"locs",
",",
"'padlon'",
":",
"padlon",
",",
"'padlat'",
":",
"padlat",
",",
"'latmin'",
":",
"numpy",
".",
"min",
"(",
"slats",
")",
"-",
"padlat",
",",
"'latmax'",
":",
"numpy",
".",
"max",
"(",
"slats",
")",
"+",
"padlat",
",",
"'lonmin'",
":",
"numpy",
".",
"min",
"(",
"slons",
")",
"-",
"padlon",
",",
"'lonmax'",
":",
"numpy",
".",
"max",
"(",
"slons",
")",
"+",
"padlon",
",",
"'sym'",
":",
"'ro'",
",",
"'boundinglat'",
":",
"0.",
",",
"'pltgrid'",
":",
"1.",
"}",
"Opts",
"[",
"'lon_0'",
"]",
"=",
"0.5",
"*",
"(",
"numpy",
".",
"min",
"(",
"slons",
")",
"+",
"numpy",
".",
"max",
"(",
"slons",
")",
")",
"Opts",
"[",
"'lat_0'",
"]",
"=",
"0.5",
"*",
"(",
"numpy",
".",
"min",
"(",
"slats",
")",
"+",
"numpy",
".",
"max",
"(",
"slats",
")",
")",
"Opts",
"[",
"'names'",
"]",
"=",
"names",
"Opts",
"[",
"'gridspace'",
"]",
"=",
"gridspace",
"Opts",
"[",
"'details'",
"]",
"=",
"{",
"'coasts'",
":",
"1",
",",
"'rivers'",
":",
"1",
",",
"'states'",
":",
"1",
",",
"'countries'",
":",
"1",
",",
"'ocean'",
":",
"0",
"}",
"if",
"ocean",
"==",
"1",
":",
"Opts",
"[",
"'details'",
"]",
"[",
"'ocean'",
"]",
"=",
"1",
"if",
"rivers",
"==",
"1",
":",
"Opts",
"[",
"'details'",
"]",
"[",
"'rivers'",
"]",
"=",
"0",
"if",
"boundaries",
"==",
"1",
":",
"Opts",
"[",
"'details'",
"]",
"[",
"'states'",
"]",
"=",
"0",
"Opts",
"[",
"'details'",
"]",
"[",
"'countries'",
"]",
"=",
"0",
"Opts",
"[",
"'details'",
"]",
"[",
"'fancy'",
"]",
"=",
"fancy",
"pmagplotlib",
".",
"plot_map",
"(",
"FIG",
"[",
"'map'",
"]",
",",
"lats",
",",
"lons",
",",
"Opts",
")",
"if",
"verbose",
":",
"pmagplotlib",
".",
"draw_figs",
"(",
"FIG",
")",
"files",
"=",
"{",
"}",
"for",
"key",
"in",
"list",
"(",
"FIG",
".",
"keys",
"(",
")",
")",
":",
"files",
"[",
"key",
"]",
"=",
"'Site_map'",
"+",
"'.'",
"+",
"fmt",
"if",
"pmagplotlib",
".",
"isServer",
":",
"black",
"=",
"'#000000'",
"purple",
"=",
"'#800080'",
"titles",
"=",
"{",
"}",
"titles",
"[",
"'map'",
"]",
"=",
"'Site Map'",
"FIG",
"=",
"pmagplotlib",
".",
"add_borders",
"(",
"FIG",
",",
"titles",
",",
"black",
",",
"purple",
")",
"pmagplotlib",
".",
"save_plots",
"(",
"FIG",
",",
"files",
")",
"elif",
"verbose",
":",
"ans",
"=",
"input",
"(",
"\" S[a]ve to save plot, Return to quit: \"",
")",
"if",
"ans",
"==",
"\"a\"",
":",
"pmagplotlib",
".",
"save_plots",
"(",
"FIG",
",",
"files",
")",
"else",
":",
"pmagplotlib",
".",
"save_plots",
"(",
"FIG",
",",
"files",
")"
] | NAME
basemap_magic.py
NB: this program no longer maintained - use plot_map_pts.py for greater functionality
DESCRIPTION
makes a map of locations in er_sites.txt
SYNTAX
basemap_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f SFILE, specify er_sites.txt or pmag_results.txt format file
-res [c,l,i,h] specify resolution (crude,low,intermediate,high)
-etp plot the etopo20 topographic mesh
-pad [LAT LON] pad bounding box by LAT/LON (default is [.5 .5] degrees)
-grd SPACE specify grid spacing
-prj [lcc] , specify projection (lcc=lambert conic conformable), default is mercator
-n print site names (default is not)
-l print location names (default is not)
-o color ocean blue/land green (default is not)
-R don't plot details of rivers
-B don't plot national/state boundaries, etc.
-sav save plot and quit quietly
-fmt [png,svg,eps,jpg,pdf] specify format for output, default is pdf
DEFAULTS
SFILE: 'er_sites.txt'
resolution: intermediate
saved images are in pdf | [
"NAME",
"basemap_magic",
".",
"py",
"NB",
":",
"this",
"program",
"no",
"longer",
"maintained",
"-",
"use",
"plot_map_pts",
".",
"py",
"for",
"greater",
"functionality"
] | python | train |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L703-L799 | def network_create_func(self, net):
"""Create network in database and dcnm
:param net: network dictionary
"""
net_id = net['id']
net_name = net.get('name')
network_db_elem = self.get_network(net_id)
# Check if the source of network creation is FW and if yes, skip
# this event.
# Check if there's a way to read the DB from service class
# TODO(padkrish)
if self.fw_api.is_network_source_fw(network_db_elem, net_name):
LOG.info("Service network %s, returning", net_name)
return
if not network_db_elem:
self.network[net_id] = {}
self.network[net_id].update(net)
net_name = net.get('name')
tenant_id = net.get('tenant_id')
# Extract segmentation_id from the network name
net_ext_name = self.cfg.dcnm.dcnm_net_ext
nobj = re.search(net_ext_name, net_name)
try:
seg_id = int((net_name[nobj.start(0) + len(net_ext_name) - 1:]
if nobj else None))
except (IndexError, TypeError, ValueError):
seg_id = None
# Check if network is already created.
query_net = self.get_network_by_segid(seg_id) if seg_id else None
if query_net:
# The network is already created no need to process the event.
if query_net.source.lower() == 'dcnm':
# DCNM created the network. Only update network id in database.
prev_id = query_net.network_id
params = dict(columns=dict(network_id=net_id))
self.update_network(prev_id, **params)
# Update the network cache.
prev_info = self.network.pop(prev_id)
prev_info['id'] = net_id
self.network[net_id] = prev_info
# Update the network name. After extracting the segmentation_id
# no need to keep it in the name. Removing it and update
# the network.
updated_net_name = (
net_name[:nobj.start(0) + len(net_ext_name) - 1])
try:
body = {'network': {'name': updated_net_name, }}
dcnm_net = self.neutronclient.update_network(
net_id, body=body).get('network')
LOG.debug('Updated network %(network)s', dcnm_net)
except Exception as exc:
LOG.exception('Failed to update network '
'%(network)s. Reason %(err)s.',
{'network': updated_net_name,
'err': str(exc)})
return
LOG.info('network_create_event: network %(name)s was created '
'by %(source)s. Ignoring processing the event.',
{'name': net_name, 'source': 'dcnm'})
return
if network_db_elem:
LOG.debug("Network %s exists, not processing" % net_name)
return
# Check if project (i.e. tenant) exist.
tenant_name = self.get_project_name(tenant_id)
if not tenant_name:
LOG.error('Failed to create network %(name)s. Project '
'%(tenant_id)s does not exist.',
{'name': net_name, 'tenant_id': tenant_id})
return
pseg_id = self.network[net_id].get('provider:segmentation_id')
seg_id = self._get_segmentation_id(net_id, pseg_id, 'openstack')
self.network[net_id]['segmentation_id'] = seg_id
try:
cfgp, fwd_mod = self.dcnm_client.get_config_profile_for_network(
net.get('name'))
self.network[net_id]['config_profile'] = cfgp
self.network[net_id]['fwd_mod'] = fwd_mod
self.add_network_db(net_id, self.network[net_id],
'openstack',
constants.SUBNET_PENDING)
LOG.debug('network_create_event: network=%s', self.network)
except dexc.DfaClientRequestFailed:
# Fail to get config profile from DCNM.
# Save the network info with failure result and send the request
# to DCNM later.
self.add_network_db(net_id, self.network[net_id], 'openstack',
constants.CREATE_FAIL)
LOG.error('Failed to create network=%s.', self.network) | [
"def",
"network_create_func",
"(",
"self",
",",
"net",
")",
":",
"net_id",
"=",
"net",
"[",
"'id'",
"]",
"net_name",
"=",
"net",
".",
"get",
"(",
"'name'",
")",
"network_db_elem",
"=",
"self",
".",
"get_network",
"(",
"net_id",
")",
"# Check if the source of network creation is FW and if yes, skip",
"# this event.",
"# Check if there's a way to read the DB from service class",
"# TODO(padkrish)",
"if",
"self",
".",
"fw_api",
".",
"is_network_source_fw",
"(",
"network_db_elem",
",",
"net_name",
")",
":",
"LOG",
".",
"info",
"(",
"\"Service network %s, returning\"",
",",
"net_name",
")",
"return",
"if",
"not",
"network_db_elem",
":",
"self",
".",
"network",
"[",
"net_id",
"]",
"=",
"{",
"}",
"self",
".",
"network",
"[",
"net_id",
"]",
".",
"update",
"(",
"net",
")",
"net_name",
"=",
"net",
".",
"get",
"(",
"'name'",
")",
"tenant_id",
"=",
"net",
".",
"get",
"(",
"'tenant_id'",
")",
"# Extract segmentation_id from the network name",
"net_ext_name",
"=",
"self",
".",
"cfg",
".",
"dcnm",
".",
"dcnm_net_ext",
"nobj",
"=",
"re",
".",
"search",
"(",
"net_ext_name",
",",
"net_name",
")",
"try",
":",
"seg_id",
"=",
"int",
"(",
"(",
"net_name",
"[",
"nobj",
".",
"start",
"(",
"0",
")",
"+",
"len",
"(",
"net_ext_name",
")",
"-",
"1",
":",
"]",
"if",
"nobj",
"else",
"None",
")",
")",
"except",
"(",
"IndexError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"seg_id",
"=",
"None",
"# Check if network is already created.",
"query_net",
"=",
"self",
".",
"get_network_by_segid",
"(",
"seg_id",
")",
"if",
"seg_id",
"else",
"None",
"if",
"query_net",
":",
"# The network is already created no need to process the event.",
"if",
"query_net",
".",
"source",
".",
"lower",
"(",
")",
"==",
"'dcnm'",
":",
"# DCNM created the network. Only update network id in database.",
"prev_id",
"=",
"query_net",
".",
"network_id",
"params",
"=",
"dict",
"(",
"columns",
"=",
"dict",
"(",
"network_id",
"=",
"net_id",
")",
")",
"self",
".",
"update_network",
"(",
"prev_id",
",",
"*",
"*",
"params",
")",
"# Update the network cache.",
"prev_info",
"=",
"self",
".",
"network",
".",
"pop",
"(",
"prev_id",
")",
"prev_info",
"[",
"'id'",
"]",
"=",
"net_id",
"self",
".",
"network",
"[",
"net_id",
"]",
"=",
"prev_info",
"# Update the network name. After extracting the segmentation_id",
"# no need to keep it in the name. Removing it and update",
"# the network.",
"updated_net_name",
"=",
"(",
"net_name",
"[",
":",
"nobj",
".",
"start",
"(",
"0",
")",
"+",
"len",
"(",
"net_ext_name",
")",
"-",
"1",
"]",
")",
"try",
":",
"body",
"=",
"{",
"'network'",
":",
"{",
"'name'",
":",
"updated_net_name",
",",
"}",
"}",
"dcnm_net",
"=",
"self",
".",
"neutronclient",
".",
"update_network",
"(",
"net_id",
",",
"body",
"=",
"body",
")",
".",
"get",
"(",
"'network'",
")",
"LOG",
".",
"debug",
"(",
"'Updated network %(network)s'",
",",
"dcnm_net",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"exception",
"(",
"'Failed to update network '",
"'%(network)s. Reason %(err)s.'",
",",
"{",
"'network'",
":",
"updated_net_name",
",",
"'err'",
":",
"str",
"(",
"exc",
")",
"}",
")",
"return",
"LOG",
".",
"info",
"(",
"'network_create_event: network %(name)s was created '",
"'by %(source)s. Ignoring processing the event.'",
",",
"{",
"'name'",
":",
"net_name",
",",
"'source'",
":",
"'dcnm'",
"}",
")",
"return",
"if",
"network_db_elem",
":",
"LOG",
".",
"debug",
"(",
"\"Network %s exists, not processing\"",
"%",
"net_name",
")",
"return",
"# Check if project (i.e. tenant) exist.",
"tenant_name",
"=",
"self",
".",
"get_project_name",
"(",
"tenant_id",
")",
"if",
"not",
"tenant_name",
":",
"LOG",
".",
"error",
"(",
"'Failed to create network %(name)s. Project '",
"'%(tenant_id)s does not exist.'",
",",
"{",
"'name'",
":",
"net_name",
",",
"'tenant_id'",
":",
"tenant_id",
"}",
")",
"return",
"pseg_id",
"=",
"self",
".",
"network",
"[",
"net_id",
"]",
".",
"get",
"(",
"'provider:segmentation_id'",
")",
"seg_id",
"=",
"self",
".",
"_get_segmentation_id",
"(",
"net_id",
",",
"pseg_id",
",",
"'openstack'",
")",
"self",
".",
"network",
"[",
"net_id",
"]",
"[",
"'segmentation_id'",
"]",
"=",
"seg_id",
"try",
":",
"cfgp",
",",
"fwd_mod",
"=",
"self",
".",
"dcnm_client",
".",
"get_config_profile_for_network",
"(",
"net",
".",
"get",
"(",
"'name'",
")",
")",
"self",
".",
"network",
"[",
"net_id",
"]",
"[",
"'config_profile'",
"]",
"=",
"cfgp",
"self",
".",
"network",
"[",
"net_id",
"]",
"[",
"'fwd_mod'",
"]",
"=",
"fwd_mod",
"self",
".",
"add_network_db",
"(",
"net_id",
",",
"self",
".",
"network",
"[",
"net_id",
"]",
",",
"'openstack'",
",",
"constants",
".",
"SUBNET_PENDING",
")",
"LOG",
".",
"debug",
"(",
"'network_create_event: network=%s'",
",",
"self",
".",
"network",
")",
"except",
"dexc",
".",
"DfaClientRequestFailed",
":",
"# Fail to get config profile from DCNM.",
"# Save the network info with failure result and send the request",
"# to DCNM later.",
"self",
".",
"add_network_db",
"(",
"net_id",
",",
"self",
".",
"network",
"[",
"net_id",
"]",
",",
"'openstack'",
",",
"constants",
".",
"CREATE_FAIL",
")",
"LOG",
".",
"error",
"(",
"'Failed to create network=%s.'",
",",
"self",
".",
"network",
")"
] | Create network in database and dcnm
:param net: network dictionary | [
"Create",
"network",
"in",
"database",
"and",
"dcnm",
":",
"param",
"net",
":",
"network",
"dictionary"
] | python | train |
atlassian-api/atlassian-python-api | atlassian/jira.py | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L631-L646 | def create_or_update_issue_remote_links(self, issue_key, link_url, title, global_id=None, relationship=None):
"""
Add Remote Link to Issue, update url if global_id is passed
:param issue_key: str
:param link_url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, OPTIONAL: Default by built-in method: 'Web Link'
"""
url = 'rest/api/2/issue/{issue_key}/remotelink'.format(issue_key=issue_key)
data = {'object': {'url': link_url, 'title': title}}
if global_id:
data['globalId'] = global_id
if relationship:
data['relationship'] = relationship
return self.post(url, data=data) | [
"def",
"create_or_update_issue_remote_links",
"(",
"self",
",",
"issue_key",
",",
"link_url",
",",
"title",
",",
"global_id",
"=",
"None",
",",
"relationship",
"=",
"None",
")",
":",
"url",
"=",
"'rest/api/2/issue/{issue_key}/remotelink'",
".",
"format",
"(",
"issue_key",
"=",
"issue_key",
")",
"data",
"=",
"{",
"'object'",
":",
"{",
"'url'",
":",
"link_url",
",",
"'title'",
":",
"title",
"}",
"}",
"if",
"global_id",
":",
"data",
"[",
"'globalId'",
"]",
"=",
"global_id",
"if",
"relationship",
":",
"data",
"[",
"'relationship'",
"]",
"=",
"relationship",
"return",
"self",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
")"
] | Add Remote Link to Issue, update url if global_id is passed
:param issue_key: str
:param link_url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, OPTIONAL: Default by built-in method: 'Web Link' | [
"Add",
"Remote",
"Link",
"to",
"Issue",
"update",
"url",
"if",
"global_id",
"is",
"passed",
":",
"param",
"issue_key",
":",
"str",
":",
"param",
"link_url",
":",
"str",
":",
"param",
"title",
":",
"str",
":",
"param",
"global_id",
":",
"str",
"OPTIONAL",
":",
":",
"param",
"relationship",
":",
"str",
"OPTIONAL",
":",
"Default",
"by",
"built",
"-",
"in",
"method",
":",
"Web",
"Link"
] | python | train |
bopo/mootdx | mootdx/quotes.py | https://github.com/bopo/mootdx/blob/7c4623e9464c75d3c87a06d48fe8734b027374fa/mootdx/quotes.py#L212-L248 | def index(
self,
symbol='000001',
market='sh',
category='9',
start='0',
offset='100'):
'''
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = 1 if market == 'sh' else 0
with self.client.connect(*self.bestip):
data = self.client.get_index_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data) | [
"def",
"index",
"(",
"self",
",",
"symbol",
"=",
"'000001'",
",",
"market",
"=",
"'sh'",
",",
"category",
"=",
"'9'",
",",
"start",
"=",
"'0'",
",",
"offset",
"=",
"'100'",
")",
":",
"market",
"=",
"1",
"if",
"market",
"==",
"'sh'",
"else",
"0",
"with",
"self",
".",
"client",
".",
"connect",
"(",
"*",
"self",
".",
"bestip",
")",
":",
"data",
"=",
"self",
".",
"client",
".",
"get_index_bars",
"(",
"int",
"(",
"category",
")",
",",
"int",
"(",
"market",
")",
",",
"str",
"(",
"symbol",
")",
",",
"int",
"(",
"start",
")",
",",
"int",
"(",
"offset",
")",
")",
"return",
"self",
".",
"client",
".",
"to_df",
"(",
"data",
")"
] | 获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None | [
"获取指数k线"
] | python | train |
sprockets/sprockets.http | sprockets/http/app.py | https://github.com/sprockets/sprockets.http/blob/8baa4cdc1fa35a162ee226fd6cc4170a0ca0ecd3/sprockets/http/app.py#L87-L104 | def start(self, io_loop):
"""
Run the ``before_run`` callbacks and queue to ``on_start`` callbacks.
:param tornado.ioloop.IOLoop io_loop: loop to start the app on.
"""
for callback in self.before_run_callbacks:
try:
callback(self.tornado_application, io_loop)
except Exception:
self.logger.error('before_run callback %r cancelled start',
callback, exc_info=1)
self.stop(io_loop)
raise
for callback in self.on_start_callbacks:
io_loop.spawn_callback(callback, self.tornado_application, io_loop) | [
"def",
"start",
"(",
"self",
",",
"io_loop",
")",
":",
"for",
"callback",
"in",
"self",
".",
"before_run_callbacks",
":",
"try",
":",
"callback",
"(",
"self",
".",
"tornado_application",
",",
"io_loop",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'before_run callback %r cancelled start'",
",",
"callback",
",",
"exc_info",
"=",
"1",
")",
"self",
".",
"stop",
"(",
"io_loop",
")",
"raise",
"for",
"callback",
"in",
"self",
".",
"on_start_callbacks",
":",
"io_loop",
".",
"spawn_callback",
"(",
"callback",
",",
"self",
".",
"tornado_application",
",",
"io_loop",
")"
] | Run the ``before_run`` callbacks and queue to ``on_start`` callbacks.
:param tornado.ioloop.IOLoop io_loop: loop to start the app on. | [
"Run",
"the",
"before_run",
"callbacks",
"and",
"queue",
"to",
"on_start",
"callbacks",
"."
] | python | train |
langloisjp/pysvcmetrics | statsdclient.py | https://github.com/langloisjp/pysvcmetrics/blob/a126fc029ab645d9db46c0f5712c416cdf80e370/statsdclient.py#L88-L98 | def timeit(self, metric, func, *args, **kwargs):
"""
Times given function and log metric in ms for duration of execution.
>>> import time
>>> client = StatsdClient()
>>> client.timeit("latency", time.sleep, 0.5)
"""
(res, seconds) = timeit(func, *args, **kwargs)
self.timing(metric, seconds * 1000.0)
return res | [
"def",
"timeit",
"(",
"self",
",",
"metric",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"res",
",",
"seconds",
")",
"=",
"timeit",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"timing",
"(",
"metric",
",",
"seconds",
"*",
"1000.0",
")",
"return",
"res"
] | Times given function and log metric in ms for duration of execution.
>>> import time
>>> client = StatsdClient()
>>> client.timeit("latency", time.sleep, 0.5) | [
"Times",
"given",
"function",
"and",
"log",
"metric",
"in",
"ms",
"for",
"duration",
"of",
"execution",
"."
] | python | train |
apache/incubator-heron | heron/tools/tracker/src/python/config.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/config.py#L51-L58 | def validate_extra_link(self, extra_link):
"""validate extra link"""
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception("Invalid extra.links format. " +
"Extra link must include a 'name' and 'formatter' field")
self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY])
return extra_link | [
"def",
"validate_extra_link",
"(",
"self",
",",
"extra_link",
")",
":",
"if",
"EXTRA_LINK_NAME_KEY",
"not",
"in",
"extra_link",
"or",
"EXTRA_LINK_FORMATTER_KEY",
"not",
"in",
"extra_link",
":",
"raise",
"Exception",
"(",
"\"Invalid extra.links format. \"",
"+",
"\"Extra link must include a 'name' and 'formatter' field\"",
")",
"self",
".",
"validated_formatter",
"(",
"extra_link",
"[",
"EXTRA_LINK_FORMATTER_KEY",
"]",
")",
"return",
"extra_link"
] | validate extra link | [
"validate",
"extra",
"link"
] | python | valid |
andy-z/ged4py | ged4py/detail/date.py | https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/date.py#L214-L234 | def parse(cls, datestr):
"""Parse string <DATE_VALUE> string and make :py:class:`DateValue`
instance out of it.
:param str datestr: String with GEDCOM date, range, period, etc.
"""
# some apps generate DATE recods without any value, which is
# non-standard, return empty DateValue for those
if not datestr:
return cls()
for regex, tmpl in DATES:
m = regex.match(datestr)
if m is not None:
groups = {}
for key, val in m.groupdict().items():
if key != 'phrase':
val = CalendarDate.parse(val)
groups[key] = val
return cls(tmpl, groups)
# if cannot parse string assume it is a phrase
return cls("($phrase)", dict(phrase=datestr)) | [
"def",
"parse",
"(",
"cls",
",",
"datestr",
")",
":",
"# some apps generate DATE recods without any value, which is",
"# non-standard, return empty DateValue for those",
"if",
"not",
"datestr",
":",
"return",
"cls",
"(",
")",
"for",
"regex",
",",
"tmpl",
"in",
"DATES",
":",
"m",
"=",
"regex",
".",
"match",
"(",
"datestr",
")",
"if",
"m",
"is",
"not",
"None",
":",
"groups",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"m",
".",
"groupdict",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"key",
"!=",
"'phrase'",
":",
"val",
"=",
"CalendarDate",
".",
"parse",
"(",
"val",
")",
"groups",
"[",
"key",
"]",
"=",
"val",
"return",
"cls",
"(",
"tmpl",
",",
"groups",
")",
"# if cannot parse string assume it is a phrase",
"return",
"cls",
"(",
"\"($phrase)\"",
",",
"dict",
"(",
"phrase",
"=",
"datestr",
")",
")"
] | Parse string <DATE_VALUE> string and make :py:class:`DateValue`
instance out of it.
:param str datestr: String with GEDCOM date, range, period, etc. | [
"Parse",
"string",
"<DATE_VALUE",
">",
"string",
"and",
"make",
":",
"py",
":",
"class",
":",
"DateValue",
"instance",
"out",
"of",
"it",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/vasp/outputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L3491-L3542 | def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections) \
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None | [
"def",
"get_band_structure_from_vasp_multiple_branches",
"(",
"dir_name",
",",
"efermi",
"=",
"None",
",",
"projections",
"=",
"False",
")",
":",
"# TODO: Add better error handling!!!",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"\"branch_0\"",
")",
")",
":",
"# get all branch dir names",
"branch_dir_names",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"d",
")",
"for",
"d",
"in",
"glob",
".",
"glob",
"(",
"\"{i}/branch_*\"",
".",
"format",
"(",
"i",
"=",
"dir_name",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"d",
")",
"]",
"# sort by the directory name (e.g, branch_10)",
"sort_by",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
".",
"split",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
")",
"sorted_branch_dir_names",
"=",
"sorted",
"(",
"branch_dir_names",
",",
"key",
"=",
"sort_by",
")",
"# populate branches with Bandstructure instances",
"branches",
"=",
"[",
"]",
"for",
"dir_name",
"in",
"sorted_branch_dir_names",
":",
"xml_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"\"vasprun.xml\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"xml_file",
")",
":",
"run",
"=",
"Vasprun",
"(",
"xml_file",
",",
"parse_projected_eigen",
"=",
"projections",
")",
"branches",
".",
"append",
"(",
"run",
".",
"get_band_structure",
"(",
"efermi",
"=",
"efermi",
")",
")",
"else",
":",
"# It might be better to throw an exception",
"warnings",
".",
"warn",
"(",
"\"Skipping {}. Unable to find {}\"",
".",
"format",
"(",
"d",
"=",
"dir_name",
",",
"f",
"=",
"xml_file",
")",
")",
"return",
"get_reconstructed_band_structure",
"(",
"branches",
",",
"efermi",
")",
"else",
":",
"xml_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"\"vasprun.xml\"",
")",
"# Better handling of Errors",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"xml_file",
")",
":",
"return",
"Vasprun",
"(",
"xml_file",
",",
"parse_projected_eigen",
"=",
"projections",
")",
".",
"get_band_structure",
"(",
"kpoints_filename",
"=",
"None",
",",
"efermi",
"=",
"efermi",
")",
"else",
":",
"return",
"None"
] | This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object | [
"This",
"method",
"is",
"used",
"to",
"get",
"band",
"structure",
"info",
"from",
"a",
"VASP",
"directory",
".",
"It",
"takes",
"into",
"account",
"that",
"the",
"run",
"can",
"be",
"divided",
"in",
"several",
"branches",
"named",
"branch_x",
".",
"If",
"the",
"run",
"has",
"not",
"been",
"divided",
"in",
"branches",
"the",
"method",
"will",
"turn",
"to",
"parsing",
"vasprun",
".",
"xml",
"directly",
"."
] | python | train |
djaodjin/djaodjin-deployutils | deployutils/apps/django/mixins.py | https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/mixins.py#L59-L69 | def get_accessibles(request, roles=None):
"""
Returns the list of *dictionnaries* for which the accounts are
accessibles by ``request.user`` filtered by ``roles`` if present.
"""
results = []
for role_name, organizations in six.iteritems(request.session.get(
'roles', {})):
if roles is None or role_name in roles:
results += organizations
return results | [
"def",
"get_accessibles",
"(",
"request",
",",
"roles",
"=",
"None",
")",
":",
"results",
"=",
"[",
"]",
"for",
"role_name",
",",
"organizations",
"in",
"six",
".",
"iteritems",
"(",
"request",
".",
"session",
".",
"get",
"(",
"'roles'",
",",
"{",
"}",
")",
")",
":",
"if",
"roles",
"is",
"None",
"or",
"role_name",
"in",
"roles",
":",
"results",
"+=",
"organizations",
"return",
"results"
] | Returns the list of *dictionnaries* for which the accounts are
accessibles by ``request.user`` filtered by ``roles`` if present. | [
"Returns",
"the",
"list",
"of",
"*",
"dictionnaries",
"*",
"for",
"which",
"the",
"accounts",
"are",
"accessibles",
"by",
"request",
".",
"user",
"filtered",
"by",
"roles",
"if",
"present",
"."
] | python | train |
pyviz/holoviews | holoviews/core/options.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L1336-L1343 | def set_display_hook(cls, group, objtype, hook):
"""
Specify a display hook that will be applied to objects of type
objtype. The group specifies the set to which the display hook
belongs, allowing the Store to compute the precedence within
each group.
"""
cls._display_hooks[group][objtype] = hook | [
"def",
"set_display_hook",
"(",
"cls",
",",
"group",
",",
"objtype",
",",
"hook",
")",
":",
"cls",
".",
"_display_hooks",
"[",
"group",
"]",
"[",
"objtype",
"]",
"=",
"hook"
] | Specify a display hook that will be applied to objects of type
objtype. The group specifies the set to which the display hook
belongs, allowing the Store to compute the precedence within
each group. | [
"Specify",
"a",
"display",
"hook",
"that",
"will",
"be",
"applied",
"to",
"objects",
"of",
"type",
"objtype",
".",
"The",
"group",
"specifies",
"the",
"set",
"to",
"which",
"the",
"display",
"hook",
"belongs",
"allowing",
"the",
"Store",
"to",
"compute",
"the",
"precedence",
"within",
"each",
"group",
"."
] | python | train |
pecan/pecan | pecan/core.py | https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/core.py#L348-L404 | def get_args(self, state, all_params, remainder, argspec, im_self):
'''
Determines the arguments for a controller based upon parameters
passed the argument specification for the controller.
'''
args = []
varargs = []
kwargs = dict()
valid_args = argspec.args[:]
if ismethod(state.controller) or im_self:
valid_args.pop(0) # pop off `self`
pecan_state = state.request.pecan
remainder = [x for x in remainder if x]
if im_self is not None:
args.append(im_self)
# grab the routing args from nested REST controllers
if 'routing_args' in pecan_state:
remainder = pecan_state['routing_args'] + list(remainder)
del pecan_state['routing_args']
# handle positional arguments
if valid_args and remainder:
args.extend(remainder[:len(valid_args)])
remainder = remainder[len(valid_args):]
valid_args = valid_args[len(args):]
# handle wildcard arguments
if [i for i in remainder if i]:
if not argspec[1]:
abort(404)
varargs.extend(remainder)
# get the default positional arguments
if argspec[3]:
defaults = dict(izip(argspec[0][-len(argspec[3]):], argspec[3]))
else:
defaults = dict()
# handle positional GET/POST params
for name in valid_args:
if name in all_params:
args.append(all_params.pop(name))
elif name in defaults:
args.append(defaults[name])
else:
break
# handle wildcard GET/POST params
if argspec[2]:
for name, value in six.iteritems(all_params):
if name not in argspec[0]:
kwargs[name] = value
return args, varargs, kwargs | [
"def",
"get_args",
"(",
"self",
",",
"state",
",",
"all_params",
",",
"remainder",
",",
"argspec",
",",
"im_self",
")",
":",
"args",
"=",
"[",
"]",
"varargs",
"=",
"[",
"]",
"kwargs",
"=",
"dict",
"(",
")",
"valid_args",
"=",
"argspec",
".",
"args",
"[",
":",
"]",
"if",
"ismethod",
"(",
"state",
".",
"controller",
")",
"or",
"im_self",
":",
"valid_args",
".",
"pop",
"(",
"0",
")",
"# pop off `self`",
"pecan_state",
"=",
"state",
".",
"request",
".",
"pecan",
"remainder",
"=",
"[",
"x",
"for",
"x",
"in",
"remainder",
"if",
"x",
"]",
"if",
"im_self",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"im_self",
")",
"# grab the routing args from nested REST controllers",
"if",
"'routing_args'",
"in",
"pecan_state",
":",
"remainder",
"=",
"pecan_state",
"[",
"'routing_args'",
"]",
"+",
"list",
"(",
"remainder",
")",
"del",
"pecan_state",
"[",
"'routing_args'",
"]",
"# handle positional arguments",
"if",
"valid_args",
"and",
"remainder",
":",
"args",
".",
"extend",
"(",
"remainder",
"[",
":",
"len",
"(",
"valid_args",
")",
"]",
")",
"remainder",
"=",
"remainder",
"[",
"len",
"(",
"valid_args",
")",
":",
"]",
"valid_args",
"=",
"valid_args",
"[",
"len",
"(",
"args",
")",
":",
"]",
"# handle wildcard arguments",
"if",
"[",
"i",
"for",
"i",
"in",
"remainder",
"if",
"i",
"]",
":",
"if",
"not",
"argspec",
"[",
"1",
"]",
":",
"abort",
"(",
"404",
")",
"varargs",
".",
"extend",
"(",
"remainder",
")",
"# get the default positional arguments",
"if",
"argspec",
"[",
"3",
"]",
":",
"defaults",
"=",
"dict",
"(",
"izip",
"(",
"argspec",
"[",
"0",
"]",
"[",
"-",
"len",
"(",
"argspec",
"[",
"3",
"]",
")",
":",
"]",
",",
"argspec",
"[",
"3",
"]",
")",
")",
"else",
":",
"defaults",
"=",
"dict",
"(",
")",
"# handle positional GET/POST params",
"for",
"name",
"in",
"valid_args",
":",
"if",
"name",
"in",
"all_params",
":",
"args",
".",
"append",
"(",
"all_params",
".",
"pop",
"(",
"name",
")",
")",
"elif",
"name",
"in",
"defaults",
":",
"args",
".",
"append",
"(",
"defaults",
"[",
"name",
"]",
")",
"else",
":",
"break",
"# handle wildcard GET/POST params",
"if",
"argspec",
"[",
"2",
"]",
":",
"for",
"name",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"all_params",
")",
":",
"if",
"name",
"not",
"in",
"argspec",
"[",
"0",
"]",
":",
"kwargs",
"[",
"name",
"]",
"=",
"value",
"return",
"args",
",",
"varargs",
",",
"kwargs"
] | Determines the arguments for a controller based upon parameters
passed the argument specification for the controller. | [
"Determines",
"the",
"arguments",
"for",
"a",
"controller",
"based",
"upon",
"parameters",
"passed",
"the",
"argument",
"specification",
"for",
"the",
"controller",
"."
] | python | train |
inveniosoftware/invenio-oauth2server | invenio_oauth2server/forms.py | https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/forms.py#L26-L58 | def scopes_multi_checkbox(field, **kwargs):
"""Render multi checkbox widget."""
kwargs.setdefault('type', 'checkbox')
field_id = kwargs.pop('id', field.id)
html = [u'<div class="row">']
for value, label, checked in field.iter_choices():
choice_id = u'%s-%s' % (field_id, value)
options = dict(
kwargs,
name=field.name,
value=value,
id=choice_id,
class_=' ',
)
if checked:
options['checked'] = 'checked'
html.append(u'<div class="col-md-3">')
html.append(u'<label for="{0}" class="checkbox-inline">'.format(
choice_id
))
html.append(u'<input {0} /> '.format(widgets.html_params(**options)))
html.append(u'{0} <br/><small class="text-muted">{1}</small>'.format(
value, label.help_text
))
html.append(u'</label></div>')
html.append(u'</div>')
return HTMLString(u''.join(html)) | [
"def",
"scopes_multi_checkbox",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'type'",
",",
"'checkbox'",
")",
"field_id",
"=",
"kwargs",
".",
"pop",
"(",
"'id'",
",",
"field",
".",
"id",
")",
"html",
"=",
"[",
"u'<div class=\"row\">'",
"]",
"for",
"value",
",",
"label",
",",
"checked",
"in",
"field",
".",
"iter_choices",
"(",
")",
":",
"choice_id",
"=",
"u'%s-%s'",
"%",
"(",
"field_id",
",",
"value",
")",
"options",
"=",
"dict",
"(",
"kwargs",
",",
"name",
"=",
"field",
".",
"name",
",",
"value",
"=",
"value",
",",
"id",
"=",
"choice_id",
",",
"class_",
"=",
"' '",
",",
")",
"if",
"checked",
":",
"options",
"[",
"'checked'",
"]",
"=",
"'checked'",
"html",
".",
"append",
"(",
"u'<div class=\"col-md-3\">'",
")",
"html",
".",
"append",
"(",
"u'<label for=\"{0}\" class=\"checkbox-inline\">'",
".",
"format",
"(",
"choice_id",
")",
")",
"html",
".",
"append",
"(",
"u'<input {0} /> '",
".",
"format",
"(",
"widgets",
".",
"html_params",
"(",
"*",
"*",
"options",
")",
")",
")",
"html",
".",
"append",
"(",
"u'{0} <br/><small class=\"text-muted\">{1}</small>'",
".",
"format",
"(",
"value",
",",
"label",
".",
"help_text",
")",
")",
"html",
".",
"append",
"(",
"u'</label></div>'",
")",
"html",
".",
"append",
"(",
"u'</div>'",
")",
"return",
"HTMLString",
"(",
"u''",
".",
"join",
"(",
"html",
")",
")"
] | Render multi checkbox widget. | [
"Render",
"multi",
"checkbox",
"widget",
"."
] | python | train |
zomux/deepy | deepy/layers/block.py | https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/layers/block.py#L64-L71 | def load_params(self, path, exclude_free_params=False):
from deepy.core import graph
"""
Load parameters to the block.
"""
from deepy.core.comp_graph import ComputationalGraph
model = graph.compile(blocks=[self])
model.load_params(path, exclude_free_params=exclude_free_params) | [
"def",
"load_params",
"(",
"self",
",",
"path",
",",
"exclude_free_params",
"=",
"False",
")",
":",
"from",
"deepy",
".",
"core",
"import",
"graph",
"from",
"deepy",
".",
"core",
".",
"comp_graph",
"import",
"ComputationalGraph",
"model",
"=",
"graph",
".",
"compile",
"(",
"blocks",
"=",
"[",
"self",
"]",
")",
"model",
".",
"load_params",
"(",
"path",
",",
"exclude_free_params",
"=",
"exclude_free_params",
")"
] | Load parameters to the block. | [
"Load",
"parameters",
"to",
"the",
"block",
"."
] | python | test |
manns/pyspread | pyspread/src/gui/_grid.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L504-L526 | def OnLinkBitmap(self, event):
"""Link bitmap event handler"""
# Get file name
wildcard = "*"
message = _("Select bitmap for current cell")
style = wx.OPEN | wx.CHANGE_DIR
filepath, __ = \
self.grid.interfaces.get_filepath_findex_from_user(wildcard,
message, style)
try:
bmp = wx.Bitmap(filepath)
except TypeError:
return
if bmp.Size == (-1, -1):
# Bitmap could not be read
return
code = "wx.Bitmap(r'{filepath}')".format(filepath=filepath)
key = self.grid.actions.cursor
self.grid.actions.set_code(key, code) | [
"def",
"OnLinkBitmap",
"(",
"self",
",",
"event",
")",
":",
"# Get file name",
"wildcard",
"=",
"\"*\"",
"message",
"=",
"_",
"(",
"\"Select bitmap for current cell\"",
")",
"style",
"=",
"wx",
".",
"OPEN",
"|",
"wx",
".",
"CHANGE_DIR",
"filepath",
",",
"__",
"=",
"self",
".",
"grid",
".",
"interfaces",
".",
"get_filepath_findex_from_user",
"(",
"wildcard",
",",
"message",
",",
"style",
")",
"try",
":",
"bmp",
"=",
"wx",
".",
"Bitmap",
"(",
"filepath",
")",
"except",
"TypeError",
":",
"return",
"if",
"bmp",
".",
"Size",
"==",
"(",
"-",
"1",
",",
"-",
"1",
")",
":",
"# Bitmap could not be read",
"return",
"code",
"=",
"\"wx.Bitmap(r'{filepath}')\"",
".",
"format",
"(",
"filepath",
"=",
"filepath",
")",
"key",
"=",
"self",
".",
"grid",
".",
"actions",
".",
"cursor",
"self",
".",
"grid",
".",
"actions",
".",
"set_code",
"(",
"key",
",",
"code",
")"
] | Link bitmap event handler | [
"Link",
"bitmap",
"event",
"handler"
] | python | train |
romanz/trezor-agent | libagent/util.py | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L87-L102 | def crc24(blob):
"""See https://tools.ietf.org/html/rfc4880#section-6.1 for details."""
CRC24_INIT = 0x0B704CE
CRC24_POLY = 0x1864CFB
crc = CRC24_INIT
for octet in bytearray(blob):
crc ^= (octet << 16)
for _ in range(8):
crc <<= 1
if crc & 0x1000000:
crc ^= CRC24_POLY
assert 0 <= crc < 0x1000000
crc_bytes = struct.pack('>L', crc)
assert crc_bytes[:1] == b'\x00'
return crc_bytes[1:] | [
"def",
"crc24",
"(",
"blob",
")",
":",
"CRC24_INIT",
"=",
"0x0B704CE",
"CRC24_POLY",
"=",
"0x1864CFB",
"crc",
"=",
"CRC24_INIT",
"for",
"octet",
"in",
"bytearray",
"(",
"blob",
")",
":",
"crc",
"^=",
"(",
"octet",
"<<",
"16",
")",
"for",
"_",
"in",
"range",
"(",
"8",
")",
":",
"crc",
"<<=",
"1",
"if",
"crc",
"&",
"0x1000000",
":",
"crc",
"^=",
"CRC24_POLY",
"assert",
"0",
"<=",
"crc",
"<",
"0x1000000",
"crc_bytes",
"=",
"struct",
".",
"pack",
"(",
"'>L'",
",",
"crc",
")",
"assert",
"crc_bytes",
"[",
":",
"1",
"]",
"==",
"b'\\x00'",
"return",
"crc_bytes",
"[",
"1",
":",
"]"
] | See https://tools.ietf.org/html/rfc4880#section-6.1 for details. | [
"See",
"https",
":",
"//",
"tools",
".",
"ietf",
".",
"org",
"/",
"html",
"/",
"rfc4880#section",
"-",
"6",
".",
"1",
"for",
"details",
"."
] | python | train |
nornir-automation/nornir | nornir/plugins/tasks/networking/napalm_cli.py | https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/plugins/tasks/networking/napalm_cli.py#L6-L19 | def napalm_cli(task: Task, commands: List[str]) -> Result:
"""
Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution
"""
device = task.host.get_connection("napalm", task.nornir.config)
result = device.cli(commands)
return Result(host=task.host, result=result) | [
"def",
"napalm_cli",
"(",
"task",
":",
"Task",
",",
"commands",
":",
"List",
"[",
"str",
"]",
")",
"->",
"Result",
":",
"device",
"=",
"task",
".",
"host",
".",
"get_connection",
"(",
"\"napalm\"",
",",
"task",
".",
"nornir",
".",
"config",
")",
"result",
"=",
"device",
".",
"cli",
"(",
"commands",
")",
"return",
"Result",
"(",
"host",
"=",
"task",
".",
"host",
",",
"result",
"=",
"result",
")"
] | Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution | [
"Run",
"commands",
"on",
"remote",
"devices",
"using",
"napalm"
] | python | train |
qacafe/cdrouter.py | cdrouter/packages.py | https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/packages.py#L329-L341 | def bulk_edit(self, _fields, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk edit a set of packages.
:param _fields: :class:`packages.Package <packages.Package>` object
:param ids: (optional) Int list of package IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
schema = PackageSchema(exclude=('id', 'created', 'updated', 'test_count', 'agent_id', 'result_id'))
_fields = self.service.encode(schema, _fields, skip_none=True)
return self.service.bulk_edit(self.base, self.RESOURCE,
_fields, ids=ids, filter=filter, type=type, all=all) | [
"def",
"bulk_edit",
"(",
"self",
",",
"_fields",
",",
"ids",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"type",
"=",
"None",
",",
"all",
"=",
"False",
")",
":",
"# pylint: disable=redefined-builtin",
"schema",
"=",
"PackageSchema",
"(",
"exclude",
"=",
"(",
"'id'",
",",
"'created'",
",",
"'updated'",
",",
"'test_count'",
",",
"'agent_id'",
",",
"'result_id'",
")",
")",
"_fields",
"=",
"self",
".",
"service",
".",
"encode",
"(",
"schema",
",",
"_fields",
",",
"skip_none",
"=",
"True",
")",
"return",
"self",
".",
"service",
".",
"bulk_edit",
"(",
"self",
".",
"base",
",",
"self",
".",
"RESOURCE",
",",
"_fields",
",",
"ids",
"=",
"ids",
",",
"filter",
"=",
"filter",
",",
"type",
"=",
"type",
",",
"all",
"=",
"all",
")"
] | Bulk edit a set of packages.
:param _fields: :class:`packages.Package <packages.Package>` object
:param ids: (optional) Int list of package IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`. | [
"Bulk",
"edit",
"a",
"set",
"of",
"packages",
"."
] | python | train |
pbrisk/timewave | timewave/producers.py | https://github.com/pbrisk/timewave/blob/cf641391d1607a424042724c8b990d43ee270ef6/timewave/producers.py#L55-L61 | def initialize_path(self, path_num=None):
""" inits producer for next path, i.e. sets current state to initial state"""
for p in self.producers:
p.initialize_path(path_num)
# self.state = copy(self.initial_state)
# self.state.path = path_num
self.random.seed(hash(self.seed) + hash(path_num)) | [
"def",
"initialize_path",
"(",
"self",
",",
"path_num",
"=",
"None",
")",
":",
"for",
"p",
"in",
"self",
".",
"producers",
":",
"p",
".",
"initialize_path",
"(",
"path_num",
")",
"# self.state = copy(self.initial_state)",
"# self.state.path = path_num",
"self",
".",
"random",
".",
"seed",
"(",
"hash",
"(",
"self",
".",
"seed",
")",
"+",
"hash",
"(",
"path_num",
")",
")"
] | inits producer for next path, i.e. sets current state to initial state | [
"inits",
"producer",
"for",
"next",
"path",
"i",
".",
"e",
".",
"sets",
"current",
"state",
"to",
"initial",
"state"
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py#L1713-L1734 | def _set_show_mpls_lsp_name_debug(self, v, load=False):
"""
Setter method for show_mpls_lsp_name_debug, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_lsp_name_debug is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_lsp_name_debug() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_lsp_name_debug.show_mpls_lsp_name_debug, is_leaf=True, yang_name="show-mpls-lsp-name-debug", rest_name="show-mpls-lsp-name-debug", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLspCmdPoint'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_lsp_name_debug must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_lsp_name_debug.show_mpls_lsp_name_debug, is_leaf=True, yang_name="show-mpls-lsp-name-debug", rest_name="show-mpls-lsp-name-debug", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLspCmdPoint'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_lsp_name_debug = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_show_mpls_lsp_name_debug",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"show_mpls_lsp_name_debug",
".",
"show_mpls_lsp_name_debug",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"show-mpls-lsp-name-debug\"",
",",
"rest_name",
"=",
"\"show-mpls-lsp-name-debug\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'showMplsLspCmdPoint'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls'",
",",
"defining_module",
"=",
"'brocade-mpls'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"show_mpls_lsp_name_debug must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=show_mpls_lsp_name_debug.show_mpls_lsp_name_debug, is_leaf=True, yang_name=\"show-mpls-lsp-name-debug\", rest_name=\"show-mpls-lsp-name-debug\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLspCmdPoint'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__show_mpls_lsp_name_debug",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for show_mpls_lsp_name_debug, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_lsp_name_debug is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_lsp_name_debug() directly. | [
"Setter",
"method",
"for",
"show_mpls_lsp_name_debug",
"mapped",
"from",
"YANG",
"variable",
"/",
"brocade_mpls_rpc",
"/",
"show_mpls_lsp_name_debug",
"(",
"rpc",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_show_mpls_lsp_name_debug",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_show_mpls_lsp_name_debug",
"()",
"directly",
"."
] | python | train |
GetmeUK/MongoFrames | snippets/comparable.py | https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/snippets/comparable.py#L310-L323 | def logged_delete(self, user):
"""Delete the document and log the event in the change log"""
self.delete()
# Log the change
entry = ChangeLogEntry({
'type': 'DELETED',
'documents': [self],
'user': user
})
entry.insert()
return entry | [
"def",
"logged_delete",
"(",
"self",
",",
"user",
")",
":",
"self",
".",
"delete",
"(",
")",
"# Log the change",
"entry",
"=",
"ChangeLogEntry",
"(",
"{",
"'type'",
":",
"'DELETED'",
",",
"'documents'",
":",
"[",
"self",
"]",
",",
"'user'",
":",
"user",
"}",
")",
"entry",
".",
"insert",
"(",
")",
"return",
"entry"
] | Delete the document and log the event in the change log | [
"Delete",
"the",
"document",
"and",
"log",
"the",
"event",
"in",
"the",
"change",
"log"
] | python | train |
CiscoDevNet/webexteamssdk | webexteamssdk/api/rooms.py | https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/api/rooms.py#L76-L133 | def list(self, teamId=None, type=None, sortBy=None, max=None,
**request_parameters):
"""List rooms.
By default, lists rooms to which the authenticated user belongs.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all rooms returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
teamId(basestring): Limit the rooms to those associated with a
team, by ID.
type(basestring): 'direct' returns all 1-to-1 rooms. `group`
returns all group rooms. If not specified or values not
matched, will return all room types.
sortBy(basestring): Sort results by room ID (`id`), most recent
activity (`lastactivity`), or most recently created
(`created`).
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the rooms returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(teamId, basestring)
check_type(type, basestring)
check_type(sortBy, basestring)
check_type(max, int)
params = dict_from_items_with_values(
request_parameters,
teamId=teamId,
type=type,
sortBy=sortBy,
max=max,
)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield room objects created from the returned items JSON objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item) | [
"def",
"list",
"(",
"self",
",",
"teamId",
"=",
"None",
",",
"type",
"=",
"None",
",",
"sortBy",
"=",
"None",
",",
"max",
"=",
"None",
",",
"*",
"*",
"request_parameters",
")",
":",
"check_type",
"(",
"teamId",
",",
"basestring",
")",
"check_type",
"(",
"type",
",",
"basestring",
")",
"check_type",
"(",
"sortBy",
",",
"basestring",
")",
"check_type",
"(",
"max",
",",
"int",
")",
"params",
"=",
"dict_from_items_with_values",
"(",
"request_parameters",
",",
"teamId",
"=",
"teamId",
",",
"type",
"=",
"type",
",",
"sortBy",
"=",
"sortBy",
",",
"max",
"=",
"max",
",",
")",
"# API request - get items",
"items",
"=",
"self",
".",
"_session",
".",
"get_items",
"(",
"API_ENDPOINT",
",",
"params",
"=",
"params",
")",
"# Yield room objects created from the returned items JSON objects",
"for",
"item",
"in",
"items",
":",
"yield",
"self",
".",
"_object_factory",
"(",
"OBJECT_TYPE",
",",
"item",
")"
] | List rooms.
By default, lists rooms to which the authenticated user belongs.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all rooms returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
teamId(basestring): Limit the rooms to those associated with a
team, by ID.
type(basestring): 'direct' returns all 1-to-1 rooms. `group`
returns all group rooms. If not specified or values not
matched, will return all room types.
sortBy(basestring): Sort results by room ID (`id`), most recent
activity (`lastactivity`), or most recently created
(`created`).
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the rooms returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. | [
"List",
"rooms",
"."
] | python | test |
biosustain/optlang | optlang/scipy_interface.py | https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/scipy_interface.py#L111-L122 | def add_variable(self, name):
"""Add a variable to the problem"""
if name in self._variables:
raise ValueError(
"A variable named " + name + " already exists."
)
self._variables[name] = len(self._variables)
self.bounds[name] = (0, None)
new_col = np.zeros(shape=[len(self._constraints), 1])
self._add_col_to_A(new_col)
self._reset_solution() | [
"def",
"add_variable",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"_variables",
":",
"raise",
"ValueError",
"(",
"\"A variable named \"",
"+",
"name",
"+",
"\" already exists.\"",
")",
"self",
".",
"_variables",
"[",
"name",
"]",
"=",
"len",
"(",
"self",
".",
"_variables",
")",
"self",
".",
"bounds",
"[",
"name",
"]",
"=",
"(",
"0",
",",
"None",
")",
"new_col",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"[",
"len",
"(",
"self",
".",
"_constraints",
")",
",",
"1",
"]",
")",
"self",
".",
"_add_col_to_A",
"(",
"new_col",
")",
"self",
".",
"_reset_solution",
"(",
")"
] | Add a variable to the problem | [
"Add",
"a",
"variable",
"to",
"the",
"problem"
] | python | train |
Toblerity/rtree | rtree/index.py | https://github.com/Toblerity/rtree/blob/5d33357c8e88f1a8344415dc15a7d2440211b281/rtree/index.py#L1363-L1366 | def deleteByteArray(self, context, page, returnError):
"""please override"""
returnError.contents.value = self.IllegalStateError
raise NotImplementedError("You must override this method.") | [
"def",
"deleteByteArray",
"(",
"self",
",",
"context",
",",
"page",
",",
"returnError",
")",
":",
"returnError",
".",
"contents",
".",
"value",
"=",
"self",
".",
"IllegalStateError",
"raise",
"NotImplementedError",
"(",
"\"You must override this method.\"",
")"
] | please override | [
"please",
"override"
] | python | test |
JukeboxPipeline/jukebox-core | src/jukeboxcore/addons/guerilla/guerillamgmt.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L2342-L2353 | def user_view_prj(self, ):
"""View the project that is currently selected
:returns: None
:rtype: None
:raises: None
"""
i = self.user_prj_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
self.view_prj(prj) | [
"def",
"user_view_prj",
"(",
"self",
",",
")",
":",
"i",
"=",
"self",
".",
"user_prj_tablev",
".",
"currentIndex",
"(",
")",
"item",
"=",
"i",
".",
"internalPointer",
"(",
")",
"if",
"item",
":",
"prj",
"=",
"item",
".",
"internal_data",
"(",
")",
"self",
".",
"view_prj",
"(",
"prj",
")"
] | View the project that is currently selected
:returns: None
:rtype: None
:raises: None | [
"View",
"the",
"project",
"that",
"is",
"currently",
"selected"
] | python | train |
python-security/pyt | pyt/cfg/expr_visitor_helper.py | https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/cfg/expr_visitor_helper.py#L43-L48 | def return_connection_handler(nodes, exit_node):
"""Connect all return statements to the Exit node."""
for function_body_node in nodes:
if isinstance(function_body_node, ConnectToExitNode):
if exit_node not in function_body_node.outgoing:
function_body_node.connect(exit_node) | [
"def",
"return_connection_handler",
"(",
"nodes",
",",
"exit_node",
")",
":",
"for",
"function_body_node",
"in",
"nodes",
":",
"if",
"isinstance",
"(",
"function_body_node",
",",
"ConnectToExitNode",
")",
":",
"if",
"exit_node",
"not",
"in",
"function_body_node",
".",
"outgoing",
":",
"function_body_node",
".",
"connect",
"(",
"exit_node",
")"
] | Connect all return statements to the Exit node. | [
"Connect",
"all",
"return",
"statements",
"to",
"the",
"Exit",
"node",
"."
] | python | train |
klmitch/requiem | requiem/processor.py | https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L104-L127 | def proc_response(self, resp, startidx=None):
"""
Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors.
"""
# If we're empty, bail out early
if not self:
return resp
# Select appropriate starting index
if startidx is None:
startidx = len(self)
for idx in range(startidx, -1, -1):
_safe_call(self[idx], 'proc_response', resp)
# Return the response we were passed
return resp | [
"def",
"proc_response",
"(",
"self",
",",
"resp",
",",
"startidx",
"=",
"None",
")",
":",
"# If we're empty, bail out early",
"if",
"not",
"self",
":",
"return",
"resp",
"# Select appropriate starting index",
"if",
"startidx",
"is",
"None",
":",
"startidx",
"=",
"len",
"(",
"self",
")",
"for",
"idx",
"in",
"range",
"(",
"startidx",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"_safe_call",
"(",
"self",
"[",
"idx",
"]",
",",
"'proc_response'",
",",
"resp",
")",
"# Return the response we were passed",
"return",
"resp"
] | Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors. | [
"Post",
"-",
"process",
"a",
"response",
"through",
"all",
"processors",
"in",
"the",
"stack",
"in",
"reverse",
"order",
".",
"For",
"convenience",
"returns",
"the",
"response",
"passed",
"to",
"the",
"method",
"."
] | python | train |
pycontribs/pyrax | pyrax/clouddatabases.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L395-L406 | def get_database(self, name):
"""
Finds the database in this instance with the specified name, and
returns a CloudDatabaseDatabase object. If no match is found, a
NoSuchDatabase exception is raised.
"""
try:
return [db for db in self.list_databases()
if db.name == name][0]
except IndexError:
raise exc.NoSuchDatabase("No database by the name '%s' exists." %
name) | [
"def",
"get_database",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"[",
"db",
"for",
"db",
"in",
"self",
".",
"list_databases",
"(",
")",
"if",
"db",
".",
"name",
"==",
"name",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"exc",
".",
"NoSuchDatabase",
"(",
"\"No database by the name '%s' exists.\"",
"%",
"name",
")"
] | Finds the database in this instance with the specified name, and
returns a CloudDatabaseDatabase object. If no match is found, a
NoSuchDatabase exception is raised. | [
"Finds",
"the",
"database",
"in",
"this",
"instance",
"with",
"the",
"specified",
"name",
"and",
"returns",
"a",
"CloudDatabaseDatabase",
"object",
".",
"If",
"no",
"match",
"is",
"found",
"a",
"NoSuchDatabase",
"exception",
"is",
"raised",
"."
] | python | train |
Kozea/pygal | pygal/graph/box.py | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/box.py#L67-L81 | def _compute(self):
"""
Compute parameters necessary for later steps
within the rendering process
"""
for serie in self.series:
serie.points, serie.outliers = \
self._box_points(serie.values, self.box_mode)
self._x_pos = [(i + .5) / self._order for i in range(self._order)]
if self._min:
self._box.ymin = min(self._min, self.zero)
if self._max:
self._box.ymax = max(self._max, self.zero) | [
"def",
"_compute",
"(",
"self",
")",
":",
"for",
"serie",
"in",
"self",
".",
"series",
":",
"serie",
".",
"points",
",",
"serie",
".",
"outliers",
"=",
"self",
".",
"_box_points",
"(",
"serie",
".",
"values",
",",
"self",
".",
"box_mode",
")",
"self",
".",
"_x_pos",
"=",
"[",
"(",
"i",
"+",
".5",
")",
"/",
"self",
".",
"_order",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_order",
")",
"]",
"if",
"self",
".",
"_min",
":",
"self",
".",
"_box",
".",
"ymin",
"=",
"min",
"(",
"self",
".",
"_min",
",",
"self",
".",
"zero",
")",
"if",
"self",
".",
"_max",
":",
"self",
".",
"_box",
".",
"ymax",
"=",
"max",
"(",
"self",
".",
"_max",
",",
"self",
".",
"zero",
")"
] | Compute parameters necessary for later steps
within the rendering process | [
"Compute",
"parameters",
"necessary",
"for",
"later",
"steps",
"within",
"the",
"rendering",
"process"
] | python | train |
GMadorell/abris | abris_transform/transformations/transformer.py | https://github.com/GMadorell/abris/blob/0d8ab7ec506835a45fae6935d129f5d7e6937bb2/abris_transform/transformations/transformer.py#L48-L66 | def __add_target_data(self, transformed_data, original_data):
"""
Picks up the target data from the original_data and appends it as a
column to the transformed_data.
Both arguments are expected to be np.array's.
"""
model = self.__config.get_data_model()
target_feature = model.find_target_feature()
name = target_feature.get_name()
if target_feature.is_categorical():
target_row = original_data[name]
target = self.__label_encoder_adapter.transform(target_row)
else:
target = original_data[name].values.astype(type_name_to_data_type("float"))
target = target[..., None]
return np.hstack((transformed_data, target)) | [
"def",
"__add_target_data",
"(",
"self",
",",
"transformed_data",
",",
"original_data",
")",
":",
"model",
"=",
"self",
".",
"__config",
".",
"get_data_model",
"(",
")",
"target_feature",
"=",
"model",
".",
"find_target_feature",
"(",
")",
"name",
"=",
"target_feature",
".",
"get_name",
"(",
")",
"if",
"target_feature",
".",
"is_categorical",
"(",
")",
":",
"target_row",
"=",
"original_data",
"[",
"name",
"]",
"target",
"=",
"self",
".",
"__label_encoder_adapter",
".",
"transform",
"(",
"target_row",
")",
"else",
":",
"target",
"=",
"original_data",
"[",
"name",
"]",
".",
"values",
".",
"astype",
"(",
"type_name_to_data_type",
"(",
"\"float\"",
")",
")",
"target",
"=",
"target",
"[",
"...",
",",
"None",
"]",
"return",
"np",
".",
"hstack",
"(",
"(",
"transformed_data",
",",
"target",
")",
")"
] | Picks up the target data from the original_data and appends it as a
column to the transformed_data.
Both arguments are expected to be np.array's. | [
"Picks",
"up",
"the",
"target",
"data",
"from",
"the",
"original_data",
"and",
"appends",
"it",
"as",
"a",
"column",
"to",
"the",
"transformed_data",
".",
"Both",
"arguments",
"are",
"expected",
"to",
"be",
"np",
".",
"array",
"s",
"."
] | python | train |
mikedh/trimesh | trimesh/path/packing.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/packing.py#L225-L324 | def multipack(polygons,
sheet_size=None,
iterations=50,
density_escape=.95,
spacing=0.094,
quantity=None):
"""
Pack polygons into a rectangle by taking each Polygon's OBB
and then packing that as a rectangle.
Parameters
------------
polygons : (n,) shapely.geometry.Polygon
Source geometry
sheet_size : (2,) float
Size of rectangular sheet
iterations : int
Number of times to run the loop
density_escape : float
When to exit early (0.0 - 1.0)
spacing : float
How big a gap to leave between polygons
quantity : (n,) int, or None
Quantity of each Polygon
Returns
-------------
overall_inserted : (m,) int
Indexes of inserted polygons
packed : (m, 3, 3) float
Homogeonous transforms from original frame to packed frame
"""
from .polygons import polygons_obb
if quantity is None:
quantity = np.ones(len(polygons), dtype=np.int64)
else:
quantity = np.asanyarray(quantity, dtype=np.int64)
if len(quantity) != len(polygons):
raise ValueError('quantity must match polygons')
# find the oriented bounding box of the polygons
obb, rectangles = polygons_obb(polygons)
# pad all sides of the rectangle
rectangles += 2.0 * spacing
# move the OBB transform so the polygon is centered
# in the padded rectangle
for i, r in enumerate(rectangles):
obb[i][0:2, 2] += r * .5
# for polygons occurring multiple times
indexes = np.hstack([np.ones(q, dtype=np.int64) * i
for i, q in enumerate(quantity)])
# stack using advanced indexing
obb = obb[indexes]
rectangles = rectangles[indexes]
# store timing
tic = time.time()
overall_density = 0.0
# if no sheet size specified, make a large one
if sheet_size is None:
max_dim = np.max(rectangles, axis=0)
sum_dim = np.sum(rectangles, axis=0)
sheet_size = [sum_dim[0], max_dim[1] * 2]
log.debug('packing %d polygons', len(polygons))
# run packing for a number of iterations, shuffling insertion order
for i in range(iterations):
(density,
offset,
inserted,
sheet) = pack_rectangles(rectangles,
sheet_size=sheet_size,
shuffle=(i != 0))
if density > overall_density:
overall_density = density
overall_offset = offset
overall_inserted = inserted
if density > density_escape:
break
toc = time.time()
log.debug('packing finished %i iterations in %f seconds',
i + 1,
toc - tic)
log.debug('%i/%i parts were packed successfully',
np.sum(overall_inserted),
quantity.sum())
log.debug('final rectangular density is %f.', overall_density)
# transformations to packed positions
packed = obb[overall_inserted]
# apply the offset and inter- polygon spacing
packed.reshape(-1, 9)[:, [2, 5]] += overall_offset + spacing
return indexes[overall_inserted], packed | [
"def",
"multipack",
"(",
"polygons",
",",
"sheet_size",
"=",
"None",
",",
"iterations",
"=",
"50",
",",
"density_escape",
"=",
".95",
",",
"spacing",
"=",
"0.094",
",",
"quantity",
"=",
"None",
")",
":",
"from",
".",
"polygons",
"import",
"polygons_obb",
"if",
"quantity",
"is",
"None",
":",
"quantity",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"polygons",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"else",
":",
"quantity",
"=",
"np",
".",
"asanyarray",
"(",
"quantity",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"if",
"len",
"(",
"quantity",
")",
"!=",
"len",
"(",
"polygons",
")",
":",
"raise",
"ValueError",
"(",
"'quantity must match polygons'",
")",
"# find the oriented bounding box of the polygons",
"obb",
",",
"rectangles",
"=",
"polygons_obb",
"(",
"polygons",
")",
"# pad all sides of the rectangle",
"rectangles",
"+=",
"2.0",
"*",
"spacing",
"# move the OBB transform so the polygon is centered",
"# in the padded rectangle",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"rectangles",
")",
":",
"obb",
"[",
"i",
"]",
"[",
"0",
":",
"2",
",",
"2",
"]",
"+=",
"r",
"*",
".5",
"# for polygons occurring multiple times",
"indexes",
"=",
"np",
".",
"hstack",
"(",
"[",
"np",
".",
"ones",
"(",
"q",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"*",
"i",
"for",
"i",
",",
"q",
"in",
"enumerate",
"(",
"quantity",
")",
"]",
")",
"# stack using advanced indexing",
"obb",
"=",
"obb",
"[",
"indexes",
"]",
"rectangles",
"=",
"rectangles",
"[",
"indexes",
"]",
"# store timing",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"overall_density",
"=",
"0.0",
"# if no sheet size specified, make a large one",
"if",
"sheet_size",
"is",
"None",
":",
"max_dim",
"=",
"np",
".",
"max",
"(",
"rectangles",
",",
"axis",
"=",
"0",
")",
"sum_dim",
"=",
"np",
".",
"sum",
"(",
"rectangles",
",",
"axis",
"=",
"0",
")",
"sheet_size",
"=",
"[",
"sum_dim",
"[",
"0",
"]",
",",
"max_dim",
"[",
"1",
"]",
"*",
"2",
"]",
"log",
".",
"debug",
"(",
"'packing %d polygons'",
",",
"len",
"(",
"polygons",
")",
")",
"# run packing for a number of iterations, shuffling insertion order",
"for",
"i",
"in",
"range",
"(",
"iterations",
")",
":",
"(",
"density",
",",
"offset",
",",
"inserted",
",",
"sheet",
")",
"=",
"pack_rectangles",
"(",
"rectangles",
",",
"sheet_size",
"=",
"sheet_size",
",",
"shuffle",
"=",
"(",
"i",
"!=",
"0",
")",
")",
"if",
"density",
">",
"overall_density",
":",
"overall_density",
"=",
"density",
"overall_offset",
"=",
"offset",
"overall_inserted",
"=",
"inserted",
"if",
"density",
">",
"density_escape",
":",
"break",
"toc",
"=",
"time",
".",
"time",
"(",
")",
"log",
".",
"debug",
"(",
"'packing finished %i iterations in %f seconds'",
",",
"i",
"+",
"1",
",",
"toc",
"-",
"tic",
")",
"log",
".",
"debug",
"(",
"'%i/%i parts were packed successfully'",
",",
"np",
".",
"sum",
"(",
"overall_inserted",
")",
",",
"quantity",
".",
"sum",
"(",
")",
")",
"log",
".",
"debug",
"(",
"'final rectangular density is %f.'",
",",
"overall_density",
")",
"# transformations to packed positions",
"packed",
"=",
"obb",
"[",
"overall_inserted",
"]",
"# apply the offset and inter- polygon spacing",
"packed",
".",
"reshape",
"(",
"-",
"1",
",",
"9",
")",
"[",
":",
",",
"[",
"2",
",",
"5",
"]",
"]",
"+=",
"overall_offset",
"+",
"spacing",
"return",
"indexes",
"[",
"overall_inserted",
"]",
",",
"packed"
] | Pack polygons into a rectangle by taking each Polygon's OBB
and then packing that as a rectangle.
Parameters
------------
polygons : (n,) shapely.geometry.Polygon
Source geometry
sheet_size : (2,) float
Size of rectangular sheet
iterations : int
Number of times to run the loop
density_escape : float
When to exit early (0.0 - 1.0)
spacing : float
How big a gap to leave between polygons
quantity : (n,) int, or None
Quantity of each Polygon
Returns
-------------
overall_inserted : (m,) int
Indexes of inserted polygons
packed : (m, 3, 3) float
Homogeonous transforms from original frame to packed frame | [
"Pack",
"polygons",
"into",
"a",
"rectangle",
"by",
"taking",
"each",
"Polygon",
"s",
"OBB",
"and",
"then",
"packing",
"that",
"as",
"a",
"rectangle",
"."
] | python | train |
treethought/flask-assistant | flask_assistant/hass.py | https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/flask_assistant/hass.py#L52-L54 | def is_state(self, entity_id, state):
"""Checks if the entity has the given state"""
return remote.is_state(self.api, entity_id, state) | [
"def",
"is_state",
"(",
"self",
",",
"entity_id",
",",
"state",
")",
":",
"return",
"remote",
".",
"is_state",
"(",
"self",
".",
"api",
",",
"entity_id",
",",
"state",
")"
] | Checks if the entity has the given state | [
"Checks",
"if",
"the",
"entity",
"has",
"the",
"given",
"state"
] | python | train |
Julius2342/pyvlx | pyvlx/parameter.py | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/parameter.py#L37-L45 | def is_valid_int(value):
"""Test if value can be rendered out of int."""
if 0 <= value <= Parameter.MAX: # This includes ON and OFF
return True
if value == Parameter.UNKNOWN_VALUE:
return True
if value == Parameter.CURRENT_POSITION:
return True
return False | [
"def",
"is_valid_int",
"(",
"value",
")",
":",
"if",
"0",
"<=",
"value",
"<=",
"Parameter",
".",
"MAX",
":",
"# This includes ON and OFF",
"return",
"True",
"if",
"value",
"==",
"Parameter",
".",
"UNKNOWN_VALUE",
":",
"return",
"True",
"if",
"value",
"==",
"Parameter",
".",
"CURRENT_POSITION",
":",
"return",
"True",
"return",
"False"
] | Test if value can be rendered out of int. | [
"Test",
"if",
"value",
"can",
"be",
"rendered",
"out",
"of",
"int",
"."
] | python | train |
RRZE-HPC/kerncraft | kerncraft/kernel.py | https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L355-L375 | def index_order(self, sources=True, destinations=True):
"""
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
"""
if sources:
arefs = chain(*self.sources.values())
else:
arefs = []
if destinations:
arefs = chain(arefs, *self.destinations.values())
ret = []
for a in [aref for aref in arefs if aref is not None]:
ref = []
for expr in a:
ref.append(expr.free_symbols)
ret.append(ref)
return ret | [
"def",
"index_order",
"(",
"self",
",",
"sources",
"=",
"True",
",",
"destinations",
"=",
"True",
")",
":",
"if",
"sources",
":",
"arefs",
"=",
"chain",
"(",
"*",
"self",
".",
"sources",
".",
"values",
"(",
")",
")",
"else",
":",
"arefs",
"=",
"[",
"]",
"if",
"destinations",
":",
"arefs",
"=",
"chain",
"(",
"arefs",
",",
"*",
"self",
".",
"destinations",
".",
"values",
"(",
")",
")",
"ret",
"=",
"[",
"]",
"for",
"a",
"in",
"[",
"aref",
"for",
"aref",
"in",
"arefs",
"if",
"aref",
"is",
"not",
"None",
"]",
":",
"ref",
"=",
"[",
"]",
"for",
"expr",
"in",
"a",
":",
"ref",
".",
"append",
"(",
"expr",
".",
"free_symbols",
")",
"ret",
".",
"append",
"(",
"ref",
")",
"return",
"ret"
] | Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output | [
"Return",
"the",
"order",
"of",
"indices",
"as",
"they",
"appear",
"in",
"array",
"references",
"."
] | python | test |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable/row_data.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/row_data.py#L432-L437 | def _create_retry_request(self):
"""Helper for :meth:`__iter__`."""
req_manager = _ReadRowsRequestManager(
self.request, self.last_scanned_row_key, self._counter
)
return req_manager.build_updated_request() | [
"def",
"_create_retry_request",
"(",
"self",
")",
":",
"req_manager",
"=",
"_ReadRowsRequestManager",
"(",
"self",
".",
"request",
",",
"self",
".",
"last_scanned_row_key",
",",
"self",
".",
"_counter",
")",
"return",
"req_manager",
".",
"build_updated_request",
"(",
")"
] | Helper for :meth:`__iter__`. | [
"Helper",
"for",
":",
"meth",
":",
"__iter__",
"."
] | python | train |
tensorflow/cleverhans | cleverhans_tutorials/mnist_tutorial_pytorch.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/mnist_tutorial_pytorch.py#L68-L170 | def mnist_tutorial(nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
train_end=-1, test_end=-1, learning_rate=LEARNING_RATE):
"""
MNIST cleverhans tutorial
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:return: an AccuracyReport object
"""
# Train a pytorch MNIST model
torch_model = PytorchMnistModel()
if torch.cuda.is_available():
torch_model = torch_model.cuda()
report = AccuracyReport()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.ToTensor()),
batch_size=batch_size)
# Truncate the datasets so that our test run more quickly
train_loader.dataset.train_data = train_loader.dataset.train_data[
:train_end]
test_loader.dataset.test_data = test_loader.dataset.test_data[:test_end]
# Train our model
optimizer = optim.Adam(torch_model.parameters(), lr=learning_rate)
train_loss = []
total = 0
correct = 0
step = 0
for _epoch in range(nb_epochs):
for xs, ys in train_loader:
xs, ys = Variable(xs), Variable(ys)
if torch.cuda.is_available():
xs, ys = xs.cuda(), ys.cuda()
optimizer.zero_grad()
preds = torch_model(xs)
loss = F.nll_loss(preds, ys)
loss.backward() # calc gradients
train_loss.append(loss.data.item())
optimizer.step() # update gradients
preds_np = preds.cpu().detach().numpy()
correct += (np.argmax(preds_np, axis=1) == ys.cpu().detach().numpy()).sum()
total += train_loader.batch_size
step += 1
if total % 1000 == 0:
acc = float(correct) / total
print('[%s] Training accuracy: %.2f%%' % (step, acc * 100))
total = 0
correct = 0
# Evaluate on clean data
total = 0
correct = 0
for xs, ys in test_loader:
xs, ys = Variable(xs), Variable(ys)
if torch.cuda.is_available():
xs, ys = xs.cuda(), ys.cuda()
preds = torch_model(xs)
preds_np = preds.cpu().detach().numpy()
correct += (np.argmax(preds_np, axis=1) == ys.cpu().detach().numpy()).sum()
total += len(xs)
acc = float(correct) / total
report.clean_train_clean_eval = acc
print('[%s] Clean accuracy: %.2f%%' % (step, acc * 100))
# We use tf for evaluation on adversarial data
sess = tf.Session()
x_op = tf.placeholder(tf.float32, shape=(None, 1, 28, 28,))
# Convert pytorch model to a tf_model and wrap it in cleverhans
tf_model_fn = convert_pytorch_model_to_tf(torch_model)
cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')
# Create an FGSM attack
fgsm_op = FastGradientMethod(cleverhans_model, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_x_op = fgsm_op.generate(x_op, **fgsm_params)
adv_preds_op = tf_model_fn(adv_x_op)
# Run an evaluation of our model against fgsm
total = 0
correct = 0
for xs, ys in test_loader:
adv_preds = sess.run(adv_preds_op, feed_dict={x_op: xs})
correct += (np.argmax(adv_preds, axis=1) == ys.cpu().detach().numpy()).sum()
total += test_loader.batch_size
acc = float(correct) / total
print('Adv accuracy: {:.3f}'.format(acc * 100))
report.clean_train_adv_eval = acc
return report | [
"def",
"mnist_tutorial",
"(",
"nb_epochs",
"=",
"NB_EPOCHS",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"train_end",
"=",
"-",
"1",
",",
"test_end",
"=",
"-",
"1",
",",
"learning_rate",
"=",
"LEARNING_RATE",
")",
":",
"# Train a pytorch MNIST model",
"torch_model",
"=",
"PytorchMnistModel",
"(",
")",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"torch_model",
"=",
"torch_model",
".",
"cuda",
"(",
")",
"report",
"=",
"AccuracyReport",
"(",
")",
"train_loader",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"datasets",
".",
"MNIST",
"(",
"'data'",
",",
"train",
"=",
"True",
",",
"download",
"=",
"True",
",",
"transform",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
")",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"test_loader",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"datasets",
".",
"MNIST",
"(",
"'data'",
",",
"train",
"=",
"False",
",",
"transform",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
")",
",",
"batch_size",
"=",
"batch_size",
")",
"# Truncate the datasets so that our test run more quickly",
"train_loader",
".",
"dataset",
".",
"train_data",
"=",
"train_loader",
".",
"dataset",
".",
"train_data",
"[",
":",
"train_end",
"]",
"test_loader",
".",
"dataset",
".",
"test_data",
"=",
"test_loader",
".",
"dataset",
".",
"test_data",
"[",
":",
"test_end",
"]",
"# Train our model",
"optimizer",
"=",
"optim",
".",
"Adam",
"(",
"torch_model",
".",
"parameters",
"(",
")",
",",
"lr",
"=",
"learning_rate",
")",
"train_loss",
"=",
"[",
"]",
"total",
"=",
"0",
"correct",
"=",
"0",
"step",
"=",
"0",
"for",
"_epoch",
"in",
"range",
"(",
"nb_epochs",
")",
":",
"for",
"xs",
",",
"ys",
"in",
"train_loader",
":",
"xs",
",",
"ys",
"=",
"Variable",
"(",
"xs",
")",
",",
"Variable",
"(",
"ys",
")",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"xs",
",",
"ys",
"=",
"xs",
".",
"cuda",
"(",
")",
",",
"ys",
".",
"cuda",
"(",
")",
"optimizer",
".",
"zero_grad",
"(",
")",
"preds",
"=",
"torch_model",
"(",
"xs",
")",
"loss",
"=",
"F",
".",
"nll_loss",
"(",
"preds",
",",
"ys",
")",
"loss",
".",
"backward",
"(",
")",
"# calc gradients",
"train_loss",
".",
"append",
"(",
"loss",
".",
"data",
".",
"item",
"(",
")",
")",
"optimizer",
".",
"step",
"(",
")",
"# update gradients",
"preds_np",
"=",
"preds",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
"correct",
"+=",
"(",
"np",
".",
"argmax",
"(",
"preds_np",
",",
"axis",
"=",
"1",
")",
"==",
"ys",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
")",
".",
"sum",
"(",
")",
"total",
"+=",
"train_loader",
".",
"batch_size",
"step",
"+=",
"1",
"if",
"total",
"%",
"1000",
"==",
"0",
":",
"acc",
"=",
"float",
"(",
"correct",
")",
"/",
"total",
"print",
"(",
"'[%s] Training accuracy: %.2f%%'",
"%",
"(",
"step",
",",
"acc",
"*",
"100",
")",
")",
"total",
"=",
"0",
"correct",
"=",
"0",
"# Evaluate on clean data",
"total",
"=",
"0",
"correct",
"=",
"0",
"for",
"xs",
",",
"ys",
"in",
"test_loader",
":",
"xs",
",",
"ys",
"=",
"Variable",
"(",
"xs",
")",
",",
"Variable",
"(",
"ys",
")",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"xs",
",",
"ys",
"=",
"xs",
".",
"cuda",
"(",
")",
",",
"ys",
".",
"cuda",
"(",
")",
"preds",
"=",
"torch_model",
"(",
"xs",
")",
"preds_np",
"=",
"preds",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
"correct",
"+=",
"(",
"np",
".",
"argmax",
"(",
"preds_np",
",",
"axis",
"=",
"1",
")",
"==",
"ys",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
")",
".",
"sum",
"(",
")",
"total",
"+=",
"len",
"(",
"xs",
")",
"acc",
"=",
"float",
"(",
"correct",
")",
"/",
"total",
"report",
".",
"clean_train_clean_eval",
"=",
"acc",
"print",
"(",
"'[%s] Clean accuracy: %.2f%%'",
"%",
"(",
"step",
",",
"acc",
"*",
"100",
")",
")",
"# We use tf for evaluation on adversarial data",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"x_op",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"1",
",",
"28",
",",
"28",
",",
")",
")",
"# Convert pytorch model to a tf_model and wrap it in cleverhans",
"tf_model_fn",
"=",
"convert_pytorch_model_to_tf",
"(",
"torch_model",
")",
"cleverhans_model",
"=",
"CallableModelWrapper",
"(",
"tf_model_fn",
",",
"output_layer",
"=",
"'logits'",
")",
"# Create an FGSM attack",
"fgsm_op",
"=",
"FastGradientMethod",
"(",
"cleverhans_model",
",",
"sess",
"=",
"sess",
")",
"fgsm_params",
"=",
"{",
"'eps'",
":",
"0.3",
",",
"'clip_min'",
":",
"0.",
",",
"'clip_max'",
":",
"1.",
"}",
"adv_x_op",
"=",
"fgsm_op",
".",
"generate",
"(",
"x_op",
",",
"*",
"*",
"fgsm_params",
")",
"adv_preds_op",
"=",
"tf_model_fn",
"(",
"adv_x_op",
")",
"# Run an evaluation of our model against fgsm",
"total",
"=",
"0",
"correct",
"=",
"0",
"for",
"xs",
",",
"ys",
"in",
"test_loader",
":",
"adv_preds",
"=",
"sess",
".",
"run",
"(",
"adv_preds_op",
",",
"feed_dict",
"=",
"{",
"x_op",
":",
"xs",
"}",
")",
"correct",
"+=",
"(",
"np",
".",
"argmax",
"(",
"adv_preds",
",",
"axis",
"=",
"1",
")",
"==",
"ys",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
")",
".",
"sum",
"(",
")",
"total",
"+=",
"test_loader",
".",
"batch_size",
"acc",
"=",
"float",
"(",
"correct",
")",
"/",
"total",
"print",
"(",
"'Adv accuracy: {:.3f}'",
".",
"format",
"(",
"acc",
"*",
"100",
")",
")",
"report",
".",
"clean_train_adv_eval",
"=",
"acc",
"return",
"report"
] | MNIST cleverhans tutorial
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:return: an AccuracyReport object | [
"MNIST",
"cleverhans",
"tutorial",
":",
"param",
"nb_epochs",
":",
"number",
"of",
"epochs",
"to",
"train",
"model",
":",
"param",
"batch_size",
":",
"size",
"of",
"training",
"batches",
":",
"param",
"learning_rate",
":",
"learning",
"rate",
"for",
"training",
":",
"return",
":",
"an",
"AccuracyReport",
"object"
] | python | train |
AshleySetter/optoanalysis | optoanalysis/optoanalysis/optoanalysis.py | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2605-L2651 | def animate_2Dscatter(x, y, NumAnimatedPoints=50, NTrailPoints=20,
xlabel="", ylabel="",
xlims=None, ylims=None, filename="testAnim.mp4",
bitrate=1e5, dpi=5e2, fps=30, figsize = [6, 6]):
"""
Animates x and y - where x and y are 1d arrays of x and y
positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints]
against each other and iterates through i.
"""
fig, ax = _plt.subplots(figsize = figsize)
alphas = _np.linspace(0.1, 1, NTrailPoints)
rgba_colors = _np.zeros((NTrailPoints,4))
# for red the first column needs to be one
rgba_colors[:,0] = 1.0
# the fourth column needs to be your alphas
rgba_colors[:, 3] = alphas
scatter = ax.scatter(x[0:NTrailPoints], y[0:NTrailPoints], color=rgba_colors)
if xlims == None:
xlims = (min(x), max(x))
if ylims == None:
ylims = (min(y), max(y))
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def animate(i, scatter):
scatter.axes.clear() # clear old scatter object
scatter = ax.scatter(x[i:i+NTrailPoints], y[i:i+NTrailPoints], color=rgba_colors, animated=True)
# create new scatter with updated data
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return scatter,
ani = _animation.FuncAnimation(fig, animate, _np.arange(1, NumAnimatedPoints),
interval=25, blit=True, fargs=[scatter])
ani.save(filename, bitrate=bitrate, dpi=dpi, fps=fps)
return None | [
"def",
"animate_2Dscatter",
"(",
"x",
",",
"y",
",",
"NumAnimatedPoints",
"=",
"50",
",",
"NTrailPoints",
"=",
"20",
",",
"xlabel",
"=",
"\"\"",
",",
"ylabel",
"=",
"\"\"",
",",
"xlims",
"=",
"None",
",",
"ylims",
"=",
"None",
",",
"filename",
"=",
"\"testAnim.mp4\"",
",",
"bitrate",
"=",
"1e5",
",",
"dpi",
"=",
"5e2",
",",
"fps",
"=",
"30",
",",
"figsize",
"=",
"[",
"6",
",",
"6",
"]",
")",
":",
"fig",
",",
"ax",
"=",
"_plt",
".",
"subplots",
"(",
"figsize",
"=",
"figsize",
")",
"alphas",
"=",
"_np",
".",
"linspace",
"(",
"0.1",
",",
"1",
",",
"NTrailPoints",
")",
"rgba_colors",
"=",
"_np",
".",
"zeros",
"(",
"(",
"NTrailPoints",
",",
"4",
")",
")",
"# for red the first column needs to be one",
"rgba_colors",
"[",
":",
",",
"0",
"]",
"=",
"1.0",
"# the fourth column needs to be your alphas",
"rgba_colors",
"[",
":",
",",
"3",
"]",
"=",
"alphas",
"scatter",
"=",
"ax",
".",
"scatter",
"(",
"x",
"[",
"0",
":",
"NTrailPoints",
"]",
",",
"y",
"[",
"0",
":",
"NTrailPoints",
"]",
",",
"color",
"=",
"rgba_colors",
")",
"if",
"xlims",
"==",
"None",
":",
"xlims",
"=",
"(",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
")",
"if",
"ylims",
"==",
"None",
":",
"ylims",
"=",
"(",
"min",
"(",
"y",
")",
",",
"max",
"(",
"y",
")",
")",
"ax",
".",
"set_xlim",
"(",
"xlims",
")",
"ax",
".",
"set_ylim",
"(",
"ylims",
")",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
")",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"def",
"animate",
"(",
"i",
",",
"scatter",
")",
":",
"scatter",
".",
"axes",
".",
"clear",
"(",
")",
"# clear old scatter object",
"scatter",
"=",
"ax",
".",
"scatter",
"(",
"x",
"[",
"i",
":",
"i",
"+",
"NTrailPoints",
"]",
",",
"y",
"[",
"i",
":",
"i",
"+",
"NTrailPoints",
"]",
",",
"color",
"=",
"rgba_colors",
",",
"animated",
"=",
"True",
")",
"# create new scatter with updated data",
"ax",
".",
"set_xlim",
"(",
"xlims",
")",
"ax",
".",
"set_ylim",
"(",
"ylims",
")",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
")",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"return",
"scatter",
",",
"ani",
"=",
"_animation",
".",
"FuncAnimation",
"(",
"fig",
",",
"animate",
",",
"_np",
".",
"arange",
"(",
"1",
",",
"NumAnimatedPoints",
")",
",",
"interval",
"=",
"25",
",",
"blit",
"=",
"True",
",",
"fargs",
"=",
"[",
"scatter",
"]",
")",
"ani",
".",
"save",
"(",
"filename",
",",
"bitrate",
"=",
"bitrate",
",",
"dpi",
"=",
"dpi",
",",
"fps",
"=",
"fps",
")",
"return",
"None"
] | Animates x and y - where x and y are 1d arrays of x and y
positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints]
against each other and iterates through i. | [
"Animates",
"x",
"and",
"y",
"-",
"where",
"x",
"and",
"y",
"are",
"1d",
"arrays",
"of",
"x",
"and",
"y",
"positions",
"and",
"it",
"plots",
"x",
"[",
"i",
":",
"i",
"+",
"NTrailPoints",
"]",
"and",
"y",
"[",
"i",
":",
"i",
"+",
"NTrailPoints",
"]",
"against",
"each",
"other",
"and",
"iterates",
"through",
"i",
"."
] | python | train |
PaulHancock/Aegean | AegeanTools/wcs_helpers.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L602-L619 | def get_beamarea_pix(self, ra, dec):
"""
Calculate the area of the beam in square pixels.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
area : float
The area of the beam in square pixels.
"""
beam = self.get_pixbeam(ra, dec)
if beam is None:
return 0
return beam.a * beam.b * np.pi | [
"def",
"get_beamarea_pix",
"(",
"self",
",",
"ra",
",",
"dec",
")",
":",
"beam",
"=",
"self",
".",
"get_pixbeam",
"(",
"ra",
",",
"dec",
")",
"if",
"beam",
"is",
"None",
":",
"return",
"0",
"return",
"beam",
".",
"a",
"*",
"beam",
".",
"b",
"*",
"np",
".",
"pi"
] | Calculate the area of the beam in square pixels.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
area : float
The area of the beam in square pixels. | [
"Calculate",
"the",
"area",
"of",
"the",
"beam",
"in",
"square",
"pixels",
"."
] | python | train |
GNS3/gns3-server | gns3server/compute/dynamips/nodes/router.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/router.py#L835-L851 | def set_ghost_status(self, ghost_status):
"""
Sets ghost RAM status
:param ghost_status: state flag indicating status
0 => Do not use IOS ghosting
1 => This is a ghost instance
2 => Use an existing ghost instance
"""
yield from self._hypervisor.send('vm set_ghost_status "{name}" {ghost_status}'.format(name=self._name,
ghost_status=ghost_status))
log.info('Router "{name}" [{id}]: ghost status set to {ghost_status}'.format(name=self._name,
id=self._id,
ghost_status=ghost_status))
self._ghost_status = ghost_status | [
"def",
"set_ghost_status",
"(",
"self",
",",
"ghost_status",
")",
":",
"yield",
"from",
"self",
".",
"_hypervisor",
".",
"send",
"(",
"'vm set_ghost_status \"{name}\" {ghost_status}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"ghost_status",
"=",
"ghost_status",
")",
")",
"log",
".",
"info",
"(",
"'Router \"{name}\" [{id}]: ghost status set to {ghost_status}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"ghost_status",
"=",
"ghost_status",
")",
")",
"self",
".",
"_ghost_status",
"=",
"ghost_status"
] | Sets ghost RAM status
:param ghost_status: state flag indicating status
0 => Do not use IOS ghosting
1 => This is a ghost instance
2 => Use an existing ghost instance | [
"Sets",
"ghost",
"RAM",
"status"
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/tiger.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/tiger.py#L93-L116 | def __add_sentence_to_document(self, sentence):
"""
Converts a sentence into a TigerSentenceGraph and adds all
its nodes, edges (and their features) to this document graph.
This also adds a ``dominance_relation`` edge from the root node of this
document graph to the root node of the sentence and appends the
sentence root node ID to ``self.sentences``.
Parameters
----------
sentence : lxml.etree._Element
a sentence from a TigerXML file in etree element format
"""
sentence_graph = TigerSentenceGraph(sentence)
self.tokens.extend(sentence_graph.tokens)
sentence_root_node_id = sentence_graph.root
self.add_nodes_from(sentence_graph.nodes(data=True))
self.add_edges_from(sentence_graph.edges(data=True))
self.add_edge(self.root, sentence_root_node_id,
layers={self.ns, self.ns+':sentence'},
edge_type=EdgeTypes.dominance_relation)
self.sentences.append(sentence_root_node_id) | [
"def",
"__add_sentence_to_document",
"(",
"self",
",",
"sentence",
")",
":",
"sentence_graph",
"=",
"TigerSentenceGraph",
"(",
"sentence",
")",
"self",
".",
"tokens",
".",
"extend",
"(",
"sentence_graph",
".",
"tokens",
")",
"sentence_root_node_id",
"=",
"sentence_graph",
".",
"root",
"self",
".",
"add_nodes_from",
"(",
"sentence_graph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
")",
"self",
".",
"add_edges_from",
"(",
"sentence_graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
")",
"self",
".",
"add_edge",
"(",
"self",
".",
"root",
",",
"sentence_root_node_id",
",",
"layers",
"=",
"{",
"self",
".",
"ns",
",",
"self",
".",
"ns",
"+",
"':sentence'",
"}",
",",
"edge_type",
"=",
"EdgeTypes",
".",
"dominance_relation",
")",
"self",
".",
"sentences",
".",
"append",
"(",
"sentence_root_node_id",
")"
] | Converts a sentence into a TigerSentenceGraph and adds all
its nodes, edges (and their features) to this document graph.
This also adds a ``dominance_relation`` edge from the root node of this
document graph to the root node of the sentence and appends the
sentence root node ID to ``self.sentences``.
Parameters
----------
sentence : lxml.etree._Element
a sentence from a TigerXML file in etree element format | [
"Converts",
"a",
"sentence",
"into",
"a",
"TigerSentenceGraph",
"and",
"adds",
"all",
"its",
"nodes",
"edges",
"(",
"and",
"their",
"features",
")",
"to",
"this",
"document",
"graph",
"."
] | python | train |
brocade/pynos | pynos/versions/base/services.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/services.py#L126-L176 | def vrrpe(self, **kwargs):
"""Enable or Disable Vrrpe.
Args:
ip_version (str): The IP version ('4' or '6') for which vrrpe
should be enabled/disabled. Default: `4`.
enable (bool): If vrrpe should be enabled or disabled. Default:
``True``.
get (bool): Get config instead of editing config. (True, False)
rbridge_id (str): The rbridge ID of the device on which vrrpe will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.services.vrrpe(rbridge_id='25',enable=False)
... dev.services.vrrpe(rbridge_id='25',enable=True)
... dev.services.vrrpe()
Traceback (most recent call last):
KeyError
"""
ip_version = kwargs.pop('ip_version', '4')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
if get:
enable = None
vrrpe_args = dict(rbridge_id=rbridge_id)
vrrpe_method = 'rbridge_id_protocol_hide_vrrp_holder_vrrp_extended'
if ip_version == '6':
vrrpe_method = 'rbridge_id_ipv6_proto_vrrpv3_vrrp_extended'
vrrpe = getattr(self._rbridge, vrrpe_method)
config = vrrpe(**vrrpe_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*vrrp-extended').set('operation', 'delete')
return callback(config) | [
"def",
"vrrpe",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ip_version",
"=",
"kwargs",
".",
"pop",
"(",
"'ip_version'",
",",
"'4'",
")",
"enable",
"=",
"kwargs",
".",
"pop",
"(",
"'enable'",
",",
"True",
")",
"get",
"=",
"kwargs",
".",
"pop",
"(",
"'get'",
",",
"False",
")",
"rbridge_id",
"=",
"kwargs",
".",
"pop",
"(",
"'rbridge_id'",
",",
"'1'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"if",
"get",
":",
"enable",
"=",
"None",
"vrrpe_args",
"=",
"dict",
"(",
"rbridge_id",
"=",
"rbridge_id",
")",
"vrrpe_method",
"=",
"'rbridge_id_protocol_hide_vrrp_holder_vrrp_extended'",
"if",
"ip_version",
"==",
"'6'",
":",
"vrrpe_method",
"=",
"'rbridge_id_ipv6_proto_vrrpv3_vrrp_extended'",
"vrrpe",
"=",
"getattr",
"(",
"self",
".",
"_rbridge",
",",
"vrrpe_method",
")",
"config",
"=",
"vrrpe",
"(",
"*",
"*",
"vrrpe_args",
")",
"if",
"get",
":",
"return",
"callback",
"(",
"config",
",",
"handler",
"=",
"'get_config'",
")",
"if",
"not",
"enable",
":",
"config",
".",
"find",
"(",
"'.//*vrrp-extended'",
")",
".",
"set",
"(",
"'operation'",
",",
"'delete'",
")",
"return",
"callback",
"(",
"config",
")"
] | Enable or Disable Vrrpe.
Args:
ip_version (str): The IP version ('4' or '6') for which vrrpe
should be enabled/disabled. Default: `4`.
enable (bool): If vrrpe should be enabled or disabled. Default:
``True``.
get (bool): Get config instead of editing config. (True, False)
rbridge_id (str): The rbridge ID of the device on which vrrpe will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.services.vrrpe(rbridge_id='25',enable=False)
... dev.services.vrrpe(rbridge_id='25',enable=True)
... dev.services.vrrpe()
Traceback (most recent call last):
KeyError | [
"Enable",
"or",
"Disable",
"Vrrpe",
".",
"Args",
":",
"ip_version",
"(",
"str",
")",
":",
"The",
"IP",
"version",
"(",
"4",
"or",
"6",
")",
"for",
"which",
"vrrpe",
"should",
"be",
"enabled",
"/",
"disabled",
".",
"Default",
":",
"4",
".",
"enable",
"(",
"bool",
")",
":",
"If",
"vrrpe",
"should",
"be",
"enabled",
"or",
"disabled",
".",
"Default",
":",
"True",
".",
"get",
"(",
"bool",
")",
":",
"Get",
"config",
"instead",
"of",
"editing",
"config",
".",
"(",
"True",
"False",
")",
"rbridge_id",
"(",
"str",
")",
":",
"The",
"rbridge",
"ID",
"of",
"the",
"device",
"on",
"which",
"vrrpe",
"will",
"be",
"enabled",
"/",
"disabled",
".",
"Default",
":",
"1",
".",
"callback",
"(",
"function",
")",
":",
"A",
"function",
"executed",
"upon",
"completion",
"of",
"the",
"method",
".",
"The",
"only",
"parameter",
"passed",
"to",
"callback",
"will",
"be",
"the",
"ElementTree",
"config",
"."
] | python | train |
lord63/tldr.py | tldr/config.py | https://github.com/lord63/tldr.py/blob/73cf9f86254691b2476910ea6a743b6d8bd04963/tldr/config.py#L14-L38 | def get_config():
"""Get the configurations from .tldrrc and return it as a dict."""
config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if not path.exists(config_path):
sys.exit("Can't find config file at: {0}. You may use `tldr init` "
"to init the config file.".format(config_path))
with io.open(config_path, encoding='utf-8') as f:
try:
config = yaml.safe_load(f)
except yaml.scanner.ScannerError:
sys.exit("The config file is not a valid YAML file.")
supported_colors = ['black', 'red', 'green', 'yellow', 'blue',
'magenta', 'cyan', 'white']
if not set(config['colors'].values()).issubset(set(supported_colors)):
sys.exit("Unsupported colors in config file: {0}.".format(
', '.join(set(config['colors'].values()) - set(supported_colors))))
if not path.exists(config['repo_directory']):
sys.exit("Can't find the tldr repo, check the `repo_directory` "
"setting in config file.")
return config | [
"def",
"get_config",
"(",
")",
":",
"config_path",
"=",
"path",
".",
"join",
"(",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'TLDR_CONFIG_DIR'",
")",
"or",
"path",
".",
"expanduser",
"(",
"'~'",
")",
")",
",",
"'.tldrrc'",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"config_path",
")",
":",
"sys",
".",
"exit",
"(",
"\"Can't find config file at: {0}. You may use `tldr init` \"",
"\"to init the config file.\"",
".",
"format",
"(",
"config_path",
")",
")",
"with",
"io",
".",
"open",
"(",
"config_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"try",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"except",
"yaml",
".",
"scanner",
".",
"ScannerError",
":",
"sys",
".",
"exit",
"(",
"\"The config file is not a valid YAML file.\"",
")",
"supported_colors",
"=",
"[",
"'black'",
",",
"'red'",
",",
"'green'",
",",
"'yellow'",
",",
"'blue'",
",",
"'magenta'",
",",
"'cyan'",
",",
"'white'",
"]",
"if",
"not",
"set",
"(",
"config",
"[",
"'colors'",
"]",
".",
"values",
"(",
")",
")",
".",
"issubset",
"(",
"set",
"(",
"supported_colors",
")",
")",
":",
"sys",
".",
"exit",
"(",
"\"Unsupported colors in config file: {0}.\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"set",
"(",
"config",
"[",
"'colors'",
"]",
".",
"values",
"(",
")",
")",
"-",
"set",
"(",
"supported_colors",
")",
")",
")",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"config",
"[",
"'repo_directory'",
"]",
")",
":",
"sys",
".",
"exit",
"(",
"\"Can't find the tldr repo, check the `repo_directory` \"",
"\"setting in config file.\"",
")",
"return",
"config"
] | Get the configurations from .tldrrc and return it as a dict. | [
"Get",
"the",
"configurations",
"from",
".",
"tldrrc",
"and",
"return",
"it",
"as",
"a",
"dict",
"."
] | python | train |
saltstack/salt | salt/utils/decorators/signature.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/decorators/signature.py#L18-L43 | def identical_signature_wrapper(original_function, wrapped_function):
'''
Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``.
'''
context = {'__wrapped__': wrapped_function}
function_def = compile(
'def {0}({1}):\n'
' return __wrapped__({2})'.format(
# Keep the original function name
original_function.__name__,
# The function signature including defaults, i.e., 'timeout=1'
inspect.formatargspec(
*salt.utils.args.get_function_argspec(original_function)
)[1:-1],
# The function signature without the defaults
inspect.formatargspec(
formatvalue=lambda val: '',
*salt.utils.args.get_function_argspec(original_function)
)[1:-1]
),
'<string>',
'exec'
)
six.exec_(function_def, context)
return wraps(original_function)(context[original_function.__name__]) | [
"def",
"identical_signature_wrapper",
"(",
"original_function",
",",
"wrapped_function",
")",
":",
"context",
"=",
"{",
"'__wrapped__'",
":",
"wrapped_function",
"}",
"function_def",
"=",
"compile",
"(",
"'def {0}({1}):\\n'",
"' return __wrapped__({2})'",
".",
"format",
"(",
"# Keep the original function name",
"original_function",
".",
"__name__",
",",
"# The function signature including defaults, i.e., 'timeout=1'",
"inspect",
".",
"formatargspec",
"(",
"*",
"salt",
".",
"utils",
".",
"args",
".",
"get_function_argspec",
"(",
"original_function",
")",
")",
"[",
"1",
":",
"-",
"1",
"]",
",",
"# The function signature without the defaults",
"inspect",
".",
"formatargspec",
"(",
"formatvalue",
"=",
"lambda",
"val",
":",
"''",
",",
"*",
"salt",
".",
"utils",
".",
"args",
".",
"get_function_argspec",
"(",
"original_function",
")",
")",
"[",
"1",
":",
"-",
"1",
"]",
")",
",",
"'<string>'",
",",
"'exec'",
")",
"six",
".",
"exec_",
"(",
"function_def",
",",
"context",
")",
"return",
"wraps",
"(",
"original_function",
")",
"(",
"context",
"[",
"original_function",
".",
"__name__",
"]",
")"
] | Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``. | [
"Return",
"a",
"function",
"with",
"identical",
"signature",
"as",
"original_function",
"s",
"which",
"will",
"call",
"the",
"wrapped_function",
"."
] | python | train |
ereOn/azmq | azmq/common.py | https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/common.py#L361-L374 | async def read(self):
"""
Read from the box in a blocking manner.
:returns: An item from the box.
"""
result = await self._queue.get()
self._can_write.set()
if self._queue.empty():
self._can_read.clear()
return result | [
"async",
"def",
"read",
"(",
"self",
")",
":",
"result",
"=",
"await",
"self",
".",
"_queue",
".",
"get",
"(",
")",
"self",
".",
"_can_write",
".",
"set",
"(",
")",
"if",
"self",
".",
"_queue",
".",
"empty",
"(",
")",
":",
"self",
".",
"_can_read",
".",
"clear",
"(",
")",
"return",
"result"
] | Read from the box in a blocking manner.
:returns: An item from the box. | [
"Read",
"from",
"the",
"box",
"in",
"a",
"blocking",
"manner",
"."
] | python | train |
fdiskyou/kcshell | kcshell/kcshell.py | https://github.com/fdiskyou/kcshell/blob/f8ea1111a4fcad1c0e31c4b7a9cb91b79bb0b32f/kcshell/kcshell.py#L44-L51 | def do_setmode(self, arg):
''' shift from ASM to DISASM '''
op_modes = config.get_op_modes()
if arg in op_modes:
op_mode = op_modes[arg]
op_mode.cmdloop()
else:
print("Error: unknown operational mode, please use 'help setmode'.") | [
"def",
"do_setmode",
"(",
"self",
",",
"arg",
")",
":",
"op_modes",
"=",
"config",
".",
"get_op_modes",
"(",
")",
"if",
"arg",
"in",
"op_modes",
":",
"op_mode",
"=",
"op_modes",
"[",
"arg",
"]",
"op_mode",
".",
"cmdloop",
"(",
")",
"else",
":",
"print",
"(",
"\"Error: unknown operational mode, please use 'help setmode'.\"",
")"
] | shift from ASM to DISASM | [
"shift",
"from",
"ASM",
"to",
"DISASM"
] | python | train |
senaite/senaite.core | bika/lims/setuphandlers.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/setuphandlers.py#L434-L441 | def setup_catalog_mappings(portal):
"""Setup portal_type -> catalog mappings
"""
logger.info("*** Setup Catalog Mappings ***")
at = api.get_tool("archetype_tool")
for portal_type, catalogs in CATALOG_MAPPINGS:
at.setCatalogsByType(portal_type, catalogs) | [
"def",
"setup_catalog_mappings",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"*** Setup Catalog Mappings ***\"",
")",
"at",
"=",
"api",
".",
"get_tool",
"(",
"\"archetype_tool\"",
")",
"for",
"portal_type",
",",
"catalogs",
"in",
"CATALOG_MAPPINGS",
":",
"at",
".",
"setCatalogsByType",
"(",
"portal_type",
",",
"catalogs",
")"
] | Setup portal_type -> catalog mappings | [
"Setup",
"portal_type",
"-",
">",
"catalog",
"mappings"
] | python | train |
core/uricore | uricore/wkz_urls.py | https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_urls.py#L412-L425 | def url_unquote(s, charset='utf-8', errors='replace'):
"""URL decode a single string with a given decoding.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
if isinstance(s, unicode):
s = s.encode(charset)
return _decode_unicode(_unquote(s), charset, errors) | [
"def",
"url_unquote",
"(",
"s",
",",
"charset",
"=",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"s",
"=",
"s",
".",
"encode",
"(",
"charset",
")",
"return",
"_decode_unicode",
"(",
"_unquote",
"(",
"s",
")",
",",
"charset",
",",
"errors",
")"
] | URL decode a single string with a given decoding.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding. | [
"URL",
"decode",
"a",
"single",
"string",
"with",
"a",
"given",
"decoding",
"."
] | python | train |
aparo/pyes | pyes/models.py | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/models.py#L58-L64 | def delete(self, bulk=False):
"""
Delete the object
"""
meta = self._meta
conn = meta['connection']
conn.delete(meta.index, meta.type, meta.id, bulk=bulk) | [
"def",
"delete",
"(",
"self",
",",
"bulk",
"=",
"False",
")",
":",
"meta",
"=",
"self",
".",
"_meta",
"conn",
"=",
"meta",
"[",
"'connection'",
"]",
"conn",
".",
"delete",
"(",
"meta",
".",
"index",
",",
"meta",
".",
"type",
",",
"meta",
".",
"id",
",",
"bulk",
"=",
"bulk",
")"
] | Delete the object | [
"Delete",
"the",
"object"
] | python | train |
blackecho/Deep-Learning-TensorFlow | yadlt/utils/utilities.py | https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/utils/utilities.py#L102-L112 | def to_one_hot(dataY):
"""Convert the vector of labels dataY into one-hot encoding.
:param dataY: vector of labels
:return: one-hot encoded labels
"""
nc = 1 + np.max(dataY)
onehot = [np.zeros(nc, dtype=np.int8) for _ in dataY]
for i, j in enumerate(dataY):
onehot[i][j] = 1
return onehot | [
"def",
"to_one_hot",
"(",
"dataY",
")",
":",
"nc",
"=",
"1",
"+",
"np",
".",
"max",
"(",
"dataY",
")",
"onehot",
"=",
"[",
"np",
".",
"zeros",
"(",
"nc",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"for",
"_",
"in",
"dataY",
"]",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"dataY",
")",
":",
"onehot",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"1",
"return",
"onehot"
] | Convert the vector of labels dataY into one-hot encoding.
:param dataY: vector of labels
:return: one-hot encoded labels | [
"Convert",
"the",
"vector",
"of",
"labels",
"dataY",
"into",
"one",
"-",
"hot",
"encoding",
"."
] | python | train |
bitprophet/ssh | ssh/agent.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/agent.py#L199-L219 | def connect(self):
"""
Method automatically called by the run() method of the AgentProxyThread
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn | [
"def",
"connect",
"(",
"self",
")",
":",
"if",
"(",
"'SSH_AUTH_SOCK'",
"in",
"os",
".",
"environ",
")",
"and",
"(",
"sys",
".",
"platform",
"!=",
"'win32'",
")",
":",
"conn",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"retry_on_signal",
"(",
"lambda",
":",
"conn",
".",
"connect",
"(",
"os",
".",
"environ",
"[",
"'SSH_AUTH_SOCK'",
"]",
")",
")",
"except",
":",
"# probably a dangling env var: the ssh agent is gone",
"return",
"elif",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"import",
"win_pageant",
"if",
"win_pageant",
".",
"can_talk_to_agent",
"(",
")",
":",
"conn",
"=",
"win_pageant",
".",
"PageantConnection",
"(",
")",
"else",
":",
"return",
"else",
":",
"# no agent support",
"return",
"self",
".",
"_conn",
"=",
"conn"
] | Method automatically called by the run() method of the AgentProxyThread | [
"Method",
"automatically",
"called",
"by",
"the",
"run",
"()",
"method",
"of",
"the",
"AgentProxyThread"
] | python | train |
contentful/contentful-management.py | contentful_management/utils.py | https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/utils.py#L73-L87 | def camel_case(snake_str):
"""
Returns a camel-cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> camel_case('foo_bar')
"fooBar"
"""
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + "".join(x.title() for x in components[1:]) | [
"def",
"camel_case",
"(",
"snake_str",
")",
":",
"components",
"=",
"snake_str",
".",
"split",
"(",
"'_'",
")",
"# We capitalize the first letter of each component except the first one",
"# with the 'title' method and join them together.",
"return",
"components",
"[",
"0",
"]",
"+",
"\"\"",
".",
"join",
"(",
"x",
".",
"title",
"(",
")",
"for",
"x",
"in",
"components",
"[",
"1",
":",
"]",
")"
] | Returns a camel-cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> camel_case('foo_bar')
"fooBar" | [
"Returns",
"a",
"camel",
"-",
"cased",
"version",
"of",
"a",
"string",
"."
] | python | train |
JoaoFelipe/pyposast | pyposast/__init__.py | https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/__init__.py#L12-L27 | def parse(code, filename='<unknown>', mode='exec', tree=None):
"""Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
visitor = Visitor(code, filename, mode, tree=tree)
return visitor.tree | [
"def",
"parse",
"(",
"code",
",",
"filename",
"=",
"'<unknown>'",
",",
"mode",
"=",
"'exec'",
",",
"tree",
"=",
"None",
")",
":",
"visitor",
"=",
"Visitor",
"(",
"code",
",",
"filename",
",",
"mode",
",",
"tree",
"=",
"tree",
")",
"return",
"visitor",
".",
"tree"
] | Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized | [
"Parse",
"the",
"source",
"into",
"an",
"AST",
"node",
"with",
"PyPosAST",
".",
"Enhance",
"nodes",
"with",
"positions"
] | python | train |
MartijnBraam/pyElectronics | electronics/devices/bmp180.py | https://github.com/MartijnBraam/pyElectronics/blob/a20878c9fa190135f1e478e9ea0b54ca43ff308e/electronics/devices/bmp180.py#L87-L104 | def temperature(self):
"""Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4
"""
ut = self.get_raw_temp()
x1 = ((ut - self.cal['AC6']) * self.cal['AC5']) >> 15
x2 = (self.cal['MC'] << 11) // (x1 + self.cal['MD'])
b5 = x1 + x2
return ((b5 + 8) >> 4) / 10 | [
"def",
"temperature",
"(",
"self",
")",
":",
"ut",
"=",
"self",
".",
"get_raw_temp",
"(",
")",
"x1",
"=",
"(",
"(",
"ut",
"-",
"self",
".",
"cal",
"[",
"'AC6'",
"]",
")",
"*",
"self",
".",
"cal",
"[",
"'AC5'",
"]",
")",
">>",
"15",
"x2",
"=",
"(",
"self",
".",
"cal",
"[",
"'MC'",
"]",
"<<",
"11",
")",
"//",
"(",
"x1",
"+",
"self",
".",
"cal",
"[",
"'MD'",
"]",
")",
"b5",
"=",
"x1",
"+",
"x2",
"return",
"(",
"(",
"b5",
"+",
"8",
")",
">>",
"4",
")",
"/",
"10"
] | Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4 | [
"Get",
"the",
"temperature",
"from",
"the",
"sensor",
"."
] | python | train |
emirozer/fake2db | fake2db/sqlite_handler.py | https://github.com/emirozer/fake2db/blob/568cf42afb3ac10fc15c4faaa1cdb84fc1f4946c/fake2db/sqlite_handler.py#L17-L33 | def fake2db_sqlite_initiator(self, number_of_rows, name=None, custom=None):
'''Main handler for the operation
'''
rows = number_of_rows
conn = self.database_caller_creator(name)
if custom:
self.custom_db_creator(rows, conn, custom)
conn.close()
sys.exit(0)
self.data_filler_simple_registration(rows, conn)
self.data_filler_detailed_registration(rows, conn)
self.data_filler_company(rows, conn)
self.data_filler_user_agent(rows, conn)
self.data_filler_customer(rows, conn)
conn.close() | [
"def",
"fake2db_sqlite_initiator",
"(",
"self",
",",
"number_of_rows",
",",
"name",
"=",
"None",
",",
"custom",
"=",
"None",
")",
":",
"rows",
"=",
"number_of_rows",
"conn",
"=",
"self",
".",
"database_caller_creator",
"(",
"name",
")",
"if",
"custom",
":",
"self",
".",
"custom_db_creator",
"(",
"rows",
",",
"conn",
",",
"custom",
")",
"conn",
".",
"close",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"self",
".",
"data_filler_simple_registration",
"(",
"rows",
",",
"conn",
")",
"self",
".",
"data_filler_detailed_registration",
"(",
"rows",
",",
"conn",
")",
"self",
".",
"data_filler_company",
"(",
"rows",
",",
"conn",
")",
"self",
".",
"data_filler_user_agent",
"(",
"rows",
",",
"conn",
")",
"self",
".",
"data_filler_customer",
"(",
"rows",
",",
"conn",
")",
"conn",
".",
"close",
"(",
")"
] | Main handler for the operation | [
"Main",
"handler",
"for",
"the",
"operation"
] | python | train |
robgolding/tasklib | tasklib/task.py | https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L551-L568 | def get(self, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(**kwargs)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise Task.DoesNotExist(
'Task matching query does not exist. '
'Lookup parameters were {0}'.format(kwargs),
)
raise ValueError(
'get() returned more than one Task -- it returned {0}! '
'Lookup parameters were {1}'.format(num, kwargs),
) | [
"def",
"get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"clone",
"=",
"self",
".",
"filter",
"(",
"*",
"*",
"kwargs",
")",
"num",
"=",
"len",
"(",
"clone",
")",
"if",
"num",
"==",
"1",
":",
"return",
"clone",
".",
"_result_cache",
"[",
"0",
"]",
"if",
"not",
"num",
":",
"raise",
"Task",
".",
"DoesNotExist",
"(",
"'Task matching query does not exist. '",
"'Lookup parameters were {0}'",
".",
"format",
"(",
"kwargs",
")",
",",
")",
"raise",
"ValueError",
"(",
"'get() returned more than one Task -- it returned {0}! '",
"'Lookup parameters were {1}'",
".",
"format",
"(",
"num",
",",
"kwargs",
")",
",",
")"
] | Performs the query and returns a single object matching the given
keyword arguments. | [
"Performs",
"the",
"query",
"and",
"returns",
"a",
"single",
"object",
"matching",
"the",
"given",
"keyword",
"arguments",
"."
] | python | train |
amorison/loam | loam/tools.py | https://github.com/amorison/loam/blob/a566c943a75e068a4510099331a1ddfe5bbbdd94/loam/tools.py#L15-L32 | def switch_opt(default, shortname, help_msg):
"""Define a switchable ConfOpt.
This creates a boolean option. If you use it in your CLI, it can be
switched on and off by prepending + or - to its name: +opt / -opt.
Args:
default (bool): the default value of the swith option.
shortname (str): short name of the option, no shortname will be used if
it is set to None.
help_msg (str): short description of the option.
Returns:
:class:`~loam.manager.ConfOpt`: a configuration option with the given
properties.
"""
return ConfOpt(bool(default), True, shortname,
dict(action=internal.Switch), True, help_msg, None) | [
"def",
"switch_opt",
"(",
"default",
",",
"shortname",
",",
"help_msg",
")",
":",
"return",
"ConfOpt",
"(",
"bool",
"(",
"default",
")",
",",
"True",
",",
"shortname",
",",
"dict",
"(",
"action",
"=",
"internal",
".",
"Switch",
")",
",",
"True",
",",
"help_msg",
",",
"None",
")"
] | Define a switchable ConfOpt.
This creates a boolean option. If you use it in your CLI, it can be
switched on and off by prepending + or - to its name: +opt / -opt.
Args:
default (bool): the default value of the swith option.
shortname (str): short name of the option, no shortname will be used if
it is set to None.
help_msg (str): short description of the option.
Returns:
:class:`~loam.manager.ConfOpt`: a configuration option with the given
properties. | [
"Define",
"a",
"switchable",
"ConfOpt",
"."
] | python | test |
modin-project/modin | modin/pandas/indexing.py | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L294-L315 | def _compute_enlarge_labels(self, locator, base_index):
"""Helper for _enlarge_axis, compute common labels and extra labels.
Returns:
nan_labels: The labels needs to be added
"""
# base_index_type can be pd.Index or pd.DatetimeIndex
# depending on user input and pandas behavior
# See issue #2264
base_index_type = type(base_index)
locator_as_index = base_index_type(locator)
nan_labels = locator_as_index.difference(base_index)
common_labels = locator_as_index.intersection(base_index)
if len(common_labels) == 0:
raise KeyError(
"None of [{labels}] are in the [{base_index_name}]".format(
labels=list(locator_as_index), base_index_name=base_index
)
)
return nan_labels | [
"def",
"_compute_enlarge_labels",
"(",
"self",
",",
"locator",
",",
"base_index",
")",
":",
"# base_index_type can be pd.Index or pd.DatetimeIndex",
"# depending on user input and pandas behavior",
"# See issue #2264",
"base_index_type",
"=",
"type",
"(",
"base_index",
")",
"locator_as_index",
"=",
"base_index_type",
"(",
"locator",
")",
"nan_labels",
"=",
"locator_as_index",
".",
"difference",
"(",
"base_index",
")",
"common_labels",
"=",
"locator_as_index",
".",
"intersection",
"(",
"base_index",
")",
"if",
"len",
"(",
"common_labels",
")",
"==",
"0",
":",
"raise",
"KeyError",
"(",
"\"None of [{labels}] are in the [{base_index_name}]\"",
".",
"format",
"(",
"labels",
"=",
"list",
"(",
"locator_as_index",
")",
",",
"base_index_name",
"=",
"base_index",
")",
")",
"return",
"nan_labels"
] | Helper for _enlarge_axis, compute common labels and extra labels.
Returns:
nan_labels: The labels needs to be added | [
"Helper",
"for",
"_enlarge_axis",
"compute",
"common",
"labels",
"and",
"extra",
"labels",
"."
] | python | train |
PlaidWeb/Publ | publ/entry.py | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L293-L310 | def _get_card(self, text, **kwargs):
""" Render out the tags for a Twitter/OpenGraph card for this entry. """
def og_tag(key, val):
""" produce an OpenGraph tag with the given key and value """
return utils.make_tag('meta', {'property': key, 'content': val}, start_end=True)
tags = og_tag('og:title', self.title(markup=False))
tags += og_tag('og:url', self.link(absolute=True))
card = cards.extract_card(text, kwargs, self.search_path)
for image in card.images:
tags += og_tag('og:image', image)
if card.description:
tags += og_tag('og:description',
self.get('Summary', card.description))
return flask.Markup(tags) | [
"def",
"_get_card",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"og_tag",
"(",
"key",
",",
"val",
")",
":",
"\"\"\" produce an OpenGraph tag with the given key and value \"\"\"",
"return",
"utils",
".",
"make_tag",
"(",
"'meta'",
",",
"{",
"'property'",
":",
"key",
",",
"'content'",
":",
"val",
"}",
",",
"start_end",
"=",
"True",
")",
"tags",
"=",
"og_tag",
"(",
"'og:title'",
",",
"self",
".",
"title",
"(",
"markup",
"=",
"False",
")",
")",
"tags",
"+=",
"og_tag",
"(",
"'og:url'",
",",
"self",
".",
"link",
"(",
"absolute",
"=",
"True",
")",
")",
"card",
"=",
"cards",
".",
"extract_card",
"(",
"text",
",",
"kwargs",
",",
"self",
".",
"search_path",
")",
"for",
"image",
"in",
"card",
".",
"images",
":",
"tags",
"+=",
"og_tag",
"(",
"'og:image'",
",",
"image",
")",
"if",
"card",
".",
"description",
":",
"tags",
"+=",
"og_tag",
"(",
"'og:description'",
",",
"self",
".",
"get",
"(",
"'Summary'",
",",
"card",
".",
"description",
")",
")",
"return",
"flask",
".",
"Markup",
"(",
"tags",
")"
] | Render out the tags for a Twitter/OpenGraph card for this entry. | [
"Render",
"out",
"the",
"tags",
"for",
"a",
"Twitter",
"/",
"OpenGraph",
"card",
"for",
"this",
"entry",
"."
] | python | train |
tango-controls/pytango | tango/utils.py | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L612-L629 | def is_int(tg_type, inc_array=False):
"""Tells if the given tango type is integer
:param tg_type: tango type
:type tg_type: :class:`tango.CmdArgType`
:param inc_array: (optional, default is False) determines if include array
in the list of checked types
:type inc_array: :py:obj:`bool`
:return: True if the given tango type is integer or False otherwise
:rtype: :py:obj:`bool`
"""
global _scalar_int_types, _array_int_types
if tg_type in _scalar_int_types:
return True
if not inc_array:
return False
return tg_type in _array_int_types | [
"def",
"is_int",
"(",
"tg_type",
",",
"inc_array",
"=",
"False",
")",
":",
"global",
"_scalar_int_types",
",",
"_array_int_types",
"if",
"tg_type",
"in",
"_scalar_int_types",
":",
"return",
"True",
"if",
"not",
"inc_array",
":",
"return",
"False",
"return",
"tg_type",
"in",
"_array_int_types"
] | Tells if the given tango type is integer
:param tg_type: tango type
:type tg_type: :class:`tango.CmdArgType`
:param inc_array: (optional, default is False) determines if include array
in the list of checked types
:type inc_array: :py:obj:`bool`
:return: True if the given tango type is integer or False otherwise
:rtype: :py:obj:`bool` | [
"Tells",
"if",
"the",
"given",
"tango",
"type",
"is",
"integer"
] | python | train |
xflr6/features | features/tools.py | https://github.com/xflr6/features/blob/f985304dd642da6ecdc66d85167d00daa4efe5f4/features/tools.py#L10-L18 | def uniqued(iterable):
"""Return unique list of items preserving order.
>>> uniqued([3, 2, 1, 3, 2, 1, 0])
[3, 2, 1, 0]
"""
seen = set()
add = seen.add
return [i for i in iterable if i not in seen and not add(i)] | [
"def",
"uniqued",
"(",
"iterable",
")",
":",
"seen",
"=",
"set",
"(",
")",
"add",
"=",
"seen",
".",
"add",
"return",
"[",
"i",
"for",
"i",
"in",
"iterable",
"if",
"i",
"not",
"in",
"seen",
"and",
"not",
"add",
"(",
"i",
")",
"]"
] | Return unique list of items preserving order.
>>> uniqued([3, 2, 1, 3, 2, 1, 0])
[3, 2, 1, 0] | [
"Return",
"unique",
"list",
"of",
"items",
"preserving",
"order",
"."
] | python | train |
OSSOS/MOP | src/jjk/preproc/cfeps_object.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/cfeps_object.py#L57-L75 | def getData(file_id,ra,dec):
"""Create a link that connects to a getData URL"""
DATA="www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca"
BASE="http://"+DATA+"/authProxy/getData"
archive="CFHT"
wcs="corrected"
import re
groups=re.match('^(?P<file_id>\d{6}).*',file_id)
if not groups:
return None
file_id=groups.group('file_id')
file_id+="p"
#### THIS IS NOT WORKING YET....
URL=BASE+"?dataset_name="+file_id+"&cutout=circle("+str(ra*57.3)+","
URL+=str(dec*57.3)+","+str(5.0/60.0)+")"
return URL | [
"def",
"getData",
"(",
"file_id",
",",
"ra",
",",
"dec",
")",
":",
"DATA",
"=",
"\"www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca\"",
"BASE",
"=",
"\"http://\"",
"+",
"DATA",
"+",
"\"/authProxy/getData\"",
"archive",
"=",
"\"CFHT\"",
"wcs",
"=",
"\"corrected\"",
"import",
"re",
"groups",
"=",
"re",
".",
"match",
"(",
"'^(?P<file_id>\\d{6}).*'",
",",
"file_id",
")",
"if",
"not",
"groups",
":",
"return",
"None",
"file_id",
"=",
"groups",
".",
"group",
"(",
"'file_id'",
")",
"file_id",
"+=",
"\"p\"",
"#### THIS IS NOT WORKING YET....",
"URL",
"=",
"BASE",
"+",
"\"?dataset_name=\"",
"+",
"file_id",
"+",
"\"&cutout=circle(\"",
"+",
"str",
"(",
"ra",
"*",
"57.3",
")",
"+",
"\",\"",
"URL",
"+=",
"str",
"(",
"dec",
"*",
"57.3",
")",
"+",
"\",\"",
"+",
"str",
"(",
"5.0",
"/",
"60.0",
")",
"+",
"\")\"",
"return",
"URL"
] | Create a link that connects to a getData URL | [
"Create",
"a",
"link",
"that",
"connects",
"to",
"a",
"getData",
"URL"
] | python | train |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/client.py#L412-L435 | def _get_reference(document_path, reference_map):
"""Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
"""
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg) | [
"def",
"_get_reference",
"(",
"document_path",
",",
"reference_map",
")",
":",
"try",
":",
"return",
"reference_map",
"[",
"document_path",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"_BAD_DOC_TEMPLATE",
".",
"format",
"(",
"document_path",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] | Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered. | [
"Get",
"a",
"document",
"reference",
"from",
"a",
"dictionary",
"."
] | python | train |
rocky/python-xdis | xdis/std.py | https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/std.py#L135-L141 | def dis(self, x=None, file=None):
"""Disassemble classes, methods, functions, generators, or code.
With no argument, disassemble the last traceback.
"""
self._print(self.Bytecode(x).dis(), file) | [
"def",
"dis",
"(",
"self",
",",
"x",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"self",
".",
"_print",
"(",
"self",
".",
"Bytecode",
"(",
"x",
")",
".",
"dis",
"(",
")",
",",
"file",
")"
] | Disassemble classes, methods, functions, generators, or code.
With no argument, disassemble the last traceback. | [
"Disassemble",
"classes",
"methods",
"functions",
"generators",
"or",
"code",
"."
] | python | train |
objectrocket/python-client | objectrocket/auth.py | https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/auth.py#L86-L96 | def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token | [
"def",
"_refresh",
"(",
"self",
")",
":",
"# Request and set a new API token.",
"new_token",
"=",
"self",
".",
"authenticate",
"(",
"self",
".",
"_username",
",",
"self",
".",
"_password",
")",
"self",
".",
"_token",
"=",
"new_token",
"logger",
".",
"info",
"(",
"'New API token received: \"{}\".'",
".",
"format",
"(",
"new_token",
")",
")",
"return",
"self",
".",
"_token"
] | Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use. | [
"Refresh",
"the",
"API",
"token",
"using",
"the",
"currently",
"bound",
"credentials",
"."
] | python | train |
jvarho/pylibscrypt | pylibscrypt/libsodium_load.py | https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/libsodium_load.py#L19-L71 | def get_libsodium():
'''Locate the libsodium C library'''
__SONAMES = (13, 10, 5, 4)
# Import libsodium from system
sys_sodium = ctypes.util.find_library('sodium')
if sys_sodium is None:
sys_sodium = ctypes.util.find_library('libsodium')
if sys_sodium:
try:
return ctypes.CDLL(sys_sodium)
except OSError:
pass
# Import from local path
if sys.platform.startswith('win'):
try:
return ctypes.cdll.LoadLibrary('libsodium')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium-{0}'.format(soname_ver)
)
except OSError:
pass
elif sys.platform.startswith('darwin'):
try:
return ctypes.cdll.LoadLibrary('libsodium.dylib')
except OSError:
try:
libidx = __file__.find('lib')
if libidx > 0:
libpath = __file__[0:libidx+3] + '/libsodium.dylib'
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
pass
else:
try:
return ctypes.cdll.LoadLibrary('libsodium.so')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium.so.{0}'.format(soname_ver)
)
except OSError:
pass | [
"def",
"get_libsodium",
"(",
")",
":",
"__SONAMES",
"=",
"(",
"13",
",",
"10",
",",
"5",
",",
"4",
")",
"# Import libsodium from system",
"sys_sodium",
"=",
"ctypes",
".",
"util",
".",
"find_library",
"(",
"'sodium'",
")",
"if",
"sys_sodium",
"is",
"None",
":",
"sys_sodium",
"=",
"ctypes",
".",
"util",
".",
"find_library",
"(",
"'libsodium'",
")",
"if",
"sys_sodium",
":",
"try",
":",
"return",
"ctypes",
".",
"CDLL",
"(",
"sys_sodium",
")",
"except",
"OSError",
":",
"pass",
"# Import from local path",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"try",
":",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"'libsodium'",
")",
"except",
"OSError",
":",
"pass",
"for",
"soname_ver",
"in",
"__SONAMES",
":",
"try",
":",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"'libsodium-{0}'",
".",
"format",
"(",
"soname_ver",
")",
")",
"except",
"OSError",
":",
"pass",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'darwin'",
")",
":",
"try",
":",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"'libsodium.dylib'",
")",
"except",
"OSError",
":",
"try",
":",
"libidx",
"=",
"__file__",
".",
"find",
"(",
"'lib'",
")",
"if",
"libidx",
">",
"0",
":",
"libpath",
"=",
"__file__",
"[",
"0",
":",
"libidx",
"+",
"3",
"]",
"+",
"'/libsodium.dylib'",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"libpath",
")",
"except",
"OSError",
":",
"pass",
"else",
":",
"try",
":",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"'libsodium.so'",
")",
"except",
"OSError",
":",
"pass",
"for",
"soname_ver",
"in",
"__SONAMES",
":",
"try",
":",
"return",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"'libsodium.so.{0}'",
".",
"format",
"(",
"soname_ver",
")",
")",
"except",
"OSError",
":",
"pass"
] | Locate the libsodium C library | [
"Locate",
"the",
"libsodium",
"C",
"library"
] | python | train |
marshmallow-code/webargs | src/webargs/pyramidparser.py | https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/pyramidparser.py#L88-L101 | def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
response = exception_response(
status_code,
detail=text_type(error),
headers=error_headers,
content_type="application/json",
)
body = json.dumps(error.messages)
response.body = body.encode("utf-8") if isinstance(body, text_type) else body
raise response | [
"def",
"handle_error",
"(",
"self",
",",
"error",
",",
"req",
",",
"schema",
",",
"error_status_code",
",",
"error_headers",
")",
":",
"status_code",
"=",
"error_status_code",
"or",
"self",
".",
"DEFAULT_VALIDATION_STATUS",
"response",
"=",
"exception_response",
"(",
"status_code",
",",
"detail",
"=",
"text_type",
"(",
"error",
")",
",",
"headers",
"=",
"error_headers",
",",
"content_type",
"=",
"\"application/json\"",
",",
")",
"body",
"=",
"json",
".",
"dumps",
"(",
"error",
".",
"messages",
")",
"response",
".",
"body",
"=",
"body",
".",
"encode",
"(",
"\"utf-8\"",
")",
"if",
"isinstance",
"(",
"body",
",",
"text_type",
")",
"else",
"body",
"raise",
"response"
] | Handles errors during parsing. Aborts the current HTTP request and
responds with a 400 error. | [
"Handles",
"errors",
"during",
"parsing",
".",
"Aborts",
"the",
"current",
"HTTP",
"request",
"and",
"responds",
"with",
"a",
"400",
"error",
"."
] | python | train |
gwastro/pycbc | pycbc/strain/lines.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/lines.py#L41-L77 | def avg_inner_product(data1, data2, bin_size):
""" Calculate the time-domain inner product averaged over bins.
Parameters
----------
data1: pycbc.types.TimeSeries
First data set.
data2: pycbc.types.TimeSeries
Second data set, with same duration and sample rate as data1.
bin_size: float
Duration of the bins the data will be divided into to calculate
the inner product.
Returns
-------
inner_prod: list
The (complex) inner product of data1 and data2 obtained in each bin.
amp: float
The absolute value of the median of the inner product.
phi: float
The angle of the median of the inner product.
"""
assert data1.duration == data2.duration
assert data1.sample_rate == data2.sample_rate
seglen = int(bin_size * data1.sample_rate)
inner_prod = []
for idx in range(int(data1.duration / bin_size)):
start, end = idx * seglen, (idx+1) * seglen
norm = len(data1[start:end])
bin_prod = 2 * sum(data1.data[start:end].real *
numpy.conjugate(data2.data[start:end])) / norm
inner_prod.append(bin_prod)
# Get the median over all bins to avoid outliers due to the presence
# of a signal in a particular bin.
inner_median = complex_median(inner_prod)
return inner_prod, numpy.abs(inner_median), numpy.angle(inner_median) | [
"def",
"avg_inner_product",
"(",
"data1",
",",
"data2",
",",
"bin_size",
")",
":",
"assert",
"data1",
".",
"duration",
"==",
"data2",
".",
"duration",
"assert",
"data1",
".",
"sample_rate",
"==",
"data2",
".",
"sample_rate",
"seglen",
"=",
"int",
"(",
"bin_size",
"*",
"data1",
".",
"sample_rate",
")",
"inner_prod",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"int",
"(",
"data1",
".",
"duration",
"/",
"bin_size",
")",
")",
":",
"start",
",",
"end",
"=",
"idx",
"*",
"seglen",
",",
"(",
"idx",
"+",
"1",
")",
"*",
"seglen",
"norm",
"=",
"len",
"(",
"data1",
"[",
"start",
":",
"end",
"]",
")",
"bin_prod",
"=",
"2",
"*",
"sum",
"(",
"data1",
".",
"data",
"[",
"start",
":",
"end",
"]",
".",
"real",
"*",
"numpy",
".",
"conjugate",
"(",
"data2",
".",
"data",
"[",
"start",
":",
"end",
"]",
")",
")",
"/",
"norm",
"inner_prod",
".",
"append",
"(",
"bin_prod",
")",
"# Get the median over all bins to avoid outliers due to the presence",
"# of a signal in a particular bin.",
"inner_median",
"=",
"complex_median",
"(",
"inner_prod",
")",
"return",
"inner_prod",
",",
"numpy",
".",
"abs",
"(",
"inner_median",
")",
",",
"numpy",
".",
"angle",
"(",
"inner_median",
")"
] | Calculate the time-domain inner product averaged over bins.
Parameters
----------
data1: pycbc.types.TimeSeries
First data set.
data2: pycbc.types.TimeSeries
Second data set, with same duration and sample rate as data1.
bin_size: float
Duration of the bins the data will be divided into to calculate
the inner product.
Returns
-------
inner_prod: list
The (complex) inner product of data1 and data2 obtained in each bin.
amp: float
The absolute value of the median of the inner product.
phi: float
The angle of the median of the inner product. | [
"Calculate",
"the",
"time",
"-",
"domain",
"inner",
"product",
"averaged",
"over",
"bins",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/cloudstack.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L218-L228 | def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip | [
"def",
"get_ip",
"(",
"data",
")",
":",
"try",
":",
"ip",
"=",
"data",
".",
"public_ips",
"[",
"0",
"]",
"except",
"Exception",
":",
"ip",
"=",
"data",
".",
"private_ips",
"[",
"0",
"]",
"return",
"ip"
] | Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one. | [
"Return",
"the",
"IP",
"address",
"of",
"the",
"VM",
"If",
"the",
"VM",
"has",
"public",
"IP",
"as",
"defined",
"by",
"libcloud",
"module",
"then",
"use",
"it",
"Otherwise",
"try",
"to",
"extract",
"the",
"private",
"IP",
"and",
"use",
"that",
"one",
"."
] | python | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L76-L110 | def _tokenize_mteval_13a(segment):
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
"""
norm = segment.rstrip()
norm = norm.replace('<skipped>', '')
norm = norm.replace('-\n', '')
norm = norm.replace('\n', ' ')
norm = norm.replace('"', '"')
norm = norm.replace('&', '&')
norm = norm.replace('<', '<')
norm = norm.replace('>', '>')
norm = u' {} '.format(norm)
norm = re.sub(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', ' \\1 ', norm)
norm = re.sub(r'([^0-9])([\.,])', '\\1 \\2 ', norm)
norm = re.sub(r'([\.,])([^0-9])', ' \\1 \\2', norm)
norm = re.sub(r'([0-9])(-)', '\\1 \\2 ', norm)
norm = re.sub(r'\s+', ' ', norm)
norm = re.sub(r'^\s+', '', norm)
norm = re.sub(r'\s+$', '', norm)
return norm | [
"def",
"_tokenize_mteval_13a",
"(",
"segment",
")",
":",
"norm",
"=",
"segment",
".",
"rstrip",
"(",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'<skipped>'",
",",
"''",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'-\\n'",
",",
"''",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'"'",
",",
"'\"'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'<'",
",",
"'<'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'>'",
",",
"'>'",
")",
"norm",
"=",
"u' {} '",
".",
"format",
"(",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([\\{-\\~\\[-\\` -\\&\\(-\\+\\:-\\@\\/])'",
",",
"' \\\\1 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([^0-9])([\\.,])'",
",",
"'\\\\1 \\\\2 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([\\.,])([^0-9])'",
",",
"' \\\\1 \\\\2'",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([0-9])(-)'",
",",
"'\\\\1 \\\\2 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'^\\s+'",
",",
"''",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'\\s+$'",
",",
"''",
",",
"norm",
")",
"return",
"norm"
] | r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string | [
"r",
"Tokenizes",
"a",
"string",
"following",
"the",
"tokenizer",
"in",
"mteval",
"-",
"v13a",
".",
"pl",
".",
"See",
"https",
":",
"//",
"github",
".",
"com",
"/",
"moses",
"-",
"smt",
"/",
"mosesdecoder",
"/",
"blob",
"/",
"master",
"/",
"scripts",
"/",
"generic",
"/",
"mteval",
"-",
"v14",
".",
"pl#L917",
"-",
"L942",
"Parameters",
"----------",
"segment",
":",
"str",
"A",
"string",
"to",
"be",
"tokenized"
] | python | train |
LonamiWebs/Telethon | telethon/network/mtprotosender.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/mtprotosender.py#L334-L347 | def _start_reconnect(self, error):
"""Starts a reconnection in the background."""
if self._user_connected and not self._reconnecting:
# We set reconnecting to True here and not inside the new task
# because it may happen that send/recv loop calls this again
# while the new task hasn't had a chance to run yet. This race
# condition puts `self.connection` in a bad state with two calls
# to its `connect` without disconnecting, so it creates a second
# receive loop. There can't be two tasks receiving data from
# the reader, since that causes an error, and the library just
# gets stuck.
# TODO It still gets stuck? Investigate where and why.
self._reconnecting = True
self._loop.create_task(self._reconnect(error)) | [
"def",
"_start_reconnect",
"(",
"self",
",",
"error",
")",
":",
"if",
"self",
".",
"_user_connected",
"and",
"not",
"self",
".",
"_reconnecting",
":",
"# We set reconnecting to True here and not inside the new task",
"# because it may happen that send/recv loop calls this again",
"# while the new task hasn't had a chance to run yet. This race",
"# condition puts `self.connection` in a bad state with two calls",
"# to its `connect` without disconnecting, so it creates a second",
"# receive loop. There can't be two tasks receiving data from",
"# the reader, since that causes an error, and the library just",
"# gets stuck.",
"# TODO It still gets stuck? Investigate where and why.",
"self",
".",
"_reconnecting",
"=",
"True",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_reconnect",
"(",
"error",
")",
")"
] | Starts a reconnection in the background. | [
"Starts",
"a",
"reconnection",
"in",
"the",
"background",
"."
] | python | train |
automl/HpBandSter | hpbandster/optimizers/config_generators/kde.py | https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/optimizers/config_generators/kde.py#L50-L82 | def get_config(self, budget):
"""
Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
"""
# No observations available for this budget sample from the prior
if len(self.kde_models.keys()) == 0:
return self.configspace.sample_configuration().get_dictionary()
# If we haven't seen anything with this budget, we sample from the kde trained on the highest budget
if budget not in self.kde_models.keys():
budget = sorted(self.kde_models.keys())[-1]
# TODO: This only works in continuous space and with gaussian kernels
kde = self.kde_models[budget]
idx = np.random.randint(0, len(self.kde_models[budget].data))
vector = [sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw) for m,bw in zip(self.kde_models[budget].data[idx], kde.bw)]
if np.any(np.array(vector)>1) or np.any(np.array(vector)<0):
raise RuntimeError("truncated normal sampling problems!")
sample = ConfigSpace.Configuration(self.configspace, vector=vector)
return sample.get_dictionary(), {} | [
"def",
"get_config",
"(",
"self",
",",
"budget",
")",
":",
"# No observations available for this budget sample from the prior",
"if",
"len",
"(",
"self",
".",
"kde_models",
".",
"keys",
"(",
")",
")",
"==",
"0",
":",
"return",
"self",
".",
"configspace",
".",
"sample_configuration",
"(",
")",
".",
"get_dictionary",
"(",
")",
"# If we haven't seen anything with this budget, we sample from the kde trained on the highest budget",
"if",
"budget",
"not",
"in",
"self",
".",
"kde_models",
".",
"keys",
"(",
")",
":",
"budget",
"=",
"sorted",
"(",
"self",
".",
"kde_models",
".",
"keys",
"(",
")",
")",
"[",
"-",
"1",
"]",
"# TODO: This only works in continuous space and with gaussian kernels",
"kde",
"=",
"self",
".",
"kde_models",
"[",
"budget",
"]",
"idx",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"kde_models",
"[",
"budget",
"]",
".",
"data",
")",
")",
"vector",
"=",
"[",
"sps",
".",
"truncnorm",
".",
"rvs",
"(",
"-",
"m",
"/",
"bw",
",",
"(",
"1",
"-",
"m",
")",
"/",
"bw",
",",
"loc",
"=",
"m",
",",
"scale",
"=",
"bw",
")",
"for",
"m",
",",
"bw",
"in",
"zip",
"(",
"self",
".",
"kde_models",
"[",
"budget",
"]",
".",
"data",
"[",
"idx",
"]",
",",
"kde",
".",
"bw",
")",
"]",
"if",
"np",
".",
"any",
"(",
"np",
".",
"array",
"(",
"vector",
")",
">",
"1",
")",
"or",
"np",
".",
"any",
"(",
"np",
".",
"array",
"(",
"vector",
")",
"<",
"0",
")",
":",
"raise",
"RuntimeError",
"(",
"\"truncated normal sampling problems!\"",
")",
"sample",
"=",
"ConfigSpace",
".",
"Configuration",
"(",
"self",
".",
"configspace",
",",
"vector",
"=",
"vector",
")",
"return",
"sample",
".",
"get_dictionary",
"(",
")",
",",
"{",
"}"
] | Function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration | [
"Function",
"to",
"sample",
"a",
"new",
"configuration"
] | python | train |
xsleonard/pystmark | pystmark.py | https://github.com/xsleonard/pystmark/blob/329ccae1a7c8d57f28fa72cd8dbbee3e39413ed6/pystmark.py#L510-L517 | def bcc(self, bcc):
'''
:param bcc: Email addresses for the 'Bcc' API field.
:type bcc: :keyword:`list` or `str`
'''
if isinstance(bcc, basestring):
bcc = bcc.split(',')
self._bcc = bcc | [
"def",
"bcc",
"(",
"self",
",",
"bcc",
")",
":",
"if",
"isinstance",
"(",
"bcc",
",",
"basestring",
")",
":",
"bcc",
"=",
"bcc",
".",
"split",
"(",
"','",
")",
"self",
".",
"_bcc",
"=",
"bcc"
] | :param bcc: Email addresses for the 'Bcc' API field.
:type bcc: :keyword:`list` or `str` | [
":",
"param",
"bcc",
":",
"Email",
"addresses",
"for",
"the",
"Bcc",
"API",
"field",
".",
":",
"type",
"bcc",
":",
":",
"keyword",
":",
"list",
"or",
"str"
] | python | train |
dwavesystems/minorminer | examples/fourcolor.py | https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L83-L126 | def chimera_block_quotient(G, blocks):
"""
Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples
"""
from networkx import Graph
from itertools import product
BG = Graph()
blockid = {}
for i, b in enumerate(blocks):
BG.add_node(i)
if not b or not all(G.has_node(x) for x in b):
continue
for q in b:
if q in blockid:
raise(RuntimeError, "two blocks overlap")
blockid[q] = i
for q, u in blockid.items():
ublock = blocks[u]
for p in G[q]:
if p not in blockid:
continue
v = blockid[p]
if BG.has_edge(u, v) or u == v:
continue
vblock = blocks[v]
if ublock[0][2] == vblock[0][2]:
block_edges = zip(ublock, vblock)
else:
block_edges = product(ublock, vblock)
if all(G.has_edge(x, y) for x, y in block_edges):
BG.add_edge(u, v)
return BG | [
"def",
"chimera_block_quotient",
"(",
"G",
",",
"blocks",
")",
":",
"from",
"networkx",
"import",
"Graph",
"from",
"itertools",
"import",
"product",
"BG",
"=",
"Graph",
"(",
")",
"blockid",
"=",
"{",
"}",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"blocks",
")",
":",
"BG",
".",
"add_node",
"(",
"i",
")",
"if",
"not",
"b",
"or",
"not",
"all",
"(",
"G",
".",
"has_node",
"(",
"x",
")",
"for",
"x",
"in",
"b",
")",
":",
"continue",
"for",
"q",
"in",
"b",
":",
"if",
"q",
"in",
"blockid",
":",
"raise",
"(",
"RuntimeError",
",",
"\"two blocks overlap\"",
")",
"blockid",
"[",
"q",
"]",
"=",
"i",
"for",
"q",
",",
"u",
"in",
"blockid",
".",
"items",
"(",
")",
":",
"ublock",
"=",
"blocks",
"[",
"u",
"]",
"for",
"p",
"in",
"G",
"[",
"q",
"]",
":",
"if",
"p",
"not",
"in",
"blockid",
":",
"continue",
"v",
"=",
"blockid",
"[",
"p",
"]",
"if",
"BG",
".",
"has_edge",
"(",
"u",
",",
"v",
")",
"or",
"u",
"==",
"v",
":",
"continue",
"vblock",
"=",
"blocks",
"[",
"v",
"]",
"if",
"ublock",
"[",
"0",
"]",
"[",
"2",
"]",
"==",
"vblock",
"[",
"0",
"]",
"[",
"2",
"]",
":",
"block_edges",
"=",
"zip",
"(",
"ublock",
",",
"vblock",
")",
"else",
":",
"block_edges",
"=",
"product",
"(",
"ublock",
",",
"vblock",
")",
"if",
"all",
"(",
"G",
".",
"has_edge",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"block_edges",
")",
":",
"BG",
".",
"add_edge",
"(",
"u",
",",
"v",
")",
"return",
"BG"
] | Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples | [
"Extract",
"the",
"blocks",
"from",
"a",
"graph",
"and",
"returns",
"a",
"block",
"-",
"quotient",
"graph",
"according",
"to",
"the",
"acceptability",
"functions",
"block_good",
"and",
"eblock_good"
] | python | test |
beczkowb/csvparser | csvparser/parser.py | https://github.com/beczkowb/csvparser/blob/f9f9bd37e10e3c1c223d559194367b25900a822a/csvparser/parser.py#L41-L52 | def is_valid(self):
"""
Validates single instance. Returns boolean value and store errors in self.errors
"""
self.errors = []
for field in self.get_all_field_names_declared_by_user():
getattr(type(self), field).is_valid(self, type(self), field)
field_errors = getattr(type(self), field).errors(self)
self.errors.extend(field_errors)
return len(self.errors) == 0 | [
"def",
"is_valid",
"(",
"self",
")",
":",
"self",
".",
"errors",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"get_all_field_names_declared_by_user",
"(",
")",
":",
"getattr",
"(",
"type",
"(",
"self",
")",
",",
"field",
")",
".",
"is_valid",
"(",
"self",
",",
"type",
"(",
"self",
")",
",",
"field",
")",
"field_errors",
"=",
"getattr",
"(",
"type",
"(",
"self",
")",
",",
"field",
")",
".",
"errors",
"(",
"self",
")",
"self",
".",
"errors",
".",
"extend",
"(",
"field_errors",
")",
"return",
"len",
"(",
"self",
".",
"errors",
")",
"==",
"0"
] | Validates single instance. Returns boolean value and store errors in self.errors | [
"Validates",
"single",
"instance",
".",
"Returns",
"boolean",
"value",
"and",
"store",
"errors",
"in",
"self",
".",
"errors"
] | python | train |
dpkp/kafka-python | kafka/coordinator/base.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/coordinator/base.py#L242-L270 | def ensure_coordinator_ready(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
with self._client._lock, self._lock:
while self.coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self.coordinator_id = self._client.least_loaded_node()
if self.coordinator_id is not None:
self._client.maybe_connect(self.coordinator_id)
continue
future = self.lookup_coordinator()
self._client.poll(future=future)
if future.failed():
if future.retriable():
if getattr(future.exception, 'invalid_metadata', False):
log.debug('Requesting metadata for group coordinator request: %s', future.exception)
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
time.sleep(self.config['retry_backoff_ms'] / 1000)
else:
raise future.exception | [
"def",
"ensure_coordinator_ready",
"(",
"self",
")",
":",
"with",
"self",
".",
"_client",
".",
"_lock",
",",
"self",
".",
"_lock",
":",
"while",
"self",
".",
"coordinator_unknown",
"(",
")",
":",
"# Prior to 0.8.2 there was no group coordinator",
"# so we will just pick a node at random and treat",
"# it as the \"coordinator\"",
"if",
"self",
".",
"config",
"[",
"'api_version'",
"]",
"<",
"(",
"0",
",",
"8",
",",
"2",
")",
":",
"self",
".",
"coordinator_id",
"=",
"self",
".",
"_client",
".",
"least_loaded_node",
"(",
")",
"if",
"self",
".",
"coordinator_id",
"is",
"not",
"None",
":",
"self",
".",
"_client",
".",
"maybe_connect",
"(",
"self",
".",
"coordinator_id",
")",
"continue",
"future",
"=",
"self",
".",
"lookup_coordinator",
"(",
")",
"self",
".",
"_client",
".",
"poll",
"(",
"future",
"=",
"future",
")",
"if",
"future",
".",
"failed",
"(",
")",
":",
"if",
"future",
".",
"retriable",
"(",
")",
":",
"if",
"getattr",
"(",
"future",
".",
"exception",
",",
"'invalid_metadata'",
",",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'Requesting metadata for group coordinator request: %s'",
",",
"future",
".",
"exception",
")",
"metadata_update",
"=",
"self",
".",
"_client",
".",
"cluster",
".",
"request_update",
"(",
")",
"self",
".",
"_client",
".",
"poll",
"(",
"future",
"=",
"metadata_update",
")",
"else",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"config",
"[",
"'retry_backoff_ms'",
"]",
"/",
"1000",
")",
"else",
":",
"raise",
"future",
".",
"exception"
] | Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue). | [
"Block",
"until",
"the",
"coordinator",
"for",
"this",
"group",
"is",
"known",
"(",
"and",
"we",
"have",
"an",
"active",
"connection",
"--",
"java",
"client",
"uses",
"unsent",
"queue",
")",
"."
] | python | train |
tradenity/python-sdk | tradenity/resources/tax_rate.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/tax_rate.py#L840-L861 | def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
"""Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data | [
"def",
"replace_tax_rate_by_id",
"(",
"cls",
",",
"tax_rate_id",
",",
"tax_rate",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_replace_tax_rate_by_id_with_http_info",
"(",
"tax_rate_id",
",",
"tax_rate",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_replace_tax_rate_by_id_with_http_info",
"(",
"tax_rate_id",
",",
"tax_rate",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread. | [
"Replace",
"TaxRate"
] | python | train |
Robpol86/appveyor-artifacts | appveyor_artifacts.py | https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L444-L491 | def get_urls(config, log):
"""Wait for AppVeyor job to finish and get all artifacts' URLs.
:param dict config: Dictionary from get_arguments().
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
:return: Paths and URLs from artifacts_urls.
:rtype: dict
"""
# Wait for job to be queued. Once it is we'll have the "version".
build_version = None
for _ in range(3):
build_version = query_build_version(config)
if build_version:
break
log.info('Waiting for job to be queued...')
time.sleep(SLEEP_FOR)
if not build_version:
log.error('Timed out waiting for job to be queued or build not found.')
raise HandledError
# Get job IDs. Wait for AppVeyor job to finish.
job_ids = list()
valid_statuses = ['success', 'failed', 'running', 'queued']
while True:
job_ids = query_job_ids(build_version, config)
statuses = set([i[1] for i in job_ids])
if 'failed' in statuses:
job = [i[0] for i in job_ids if i[1] == 'failed'][0]
url = 'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'.format(config['owner'], config['repo'], job)
log.error('AppVeyor job failed: %s', url)
raise HandledError
if statuses == set(valid_statuses[:1]):
log.info('Build successful. Found %d job%s.', len(job_ids), '' if len(job_ids) == 1 else 's')
break
if 'running' in statuses:
log.info('Waiting for job%s to finish...', '' if len(job_ids) == 1 else 's')
elif 'queued' in statuses:
log.info('Waiting for all jobs to start...')
else:
log.error('Got unknown status from AppVeyor API: %s', ' '.join(statuses - set(valid_statuses)))
raise HandledError
time.sleep(SLEEP_FOR)
# Get artifacts.
artifacts = query_artifacts([i[0] for i in job_ids])
log.info('Found %d artifact%s.', len(artifacts), '' if len(artifacts) == 1 else 's')
return artifacts_urls(config, artifacts) if artifacts else dict() | [
"def",
"get_urls",
"(",
"config",
",",
"log",
")",
":",
"# Wait for job to be queued. Once it is we'll have the \"version\".",
"build_version",
"=",
"None",
"for",
"_",
"in",
"range",
"(",
"3",
")",
":",
"build_version",
"=",
"query_build_version",
"(",
"config",
")",
"if",
"build_version",
":",
"break",
"log",
".",
"info",
"(",
"'Waiting for job to be queued...'",
")",
"time",
".",
"sleep",
"(",
"SLEEP_FOR",
")",
"if",
"not",
"build_version",
":",
"log",
".",
"error",
"(",
"'Timed out waiting for job to be queued or build not found.'",
")",
"raise",
"HandledError",
"# Get job IDs. Wait for AppVeyor job to finish.",
"job_ids",
"=",
"list",
"(",
")",
"valid_statuses",
"=",
"[",
"'success'",
",",
"'failed'",
",",
"'running'",
",",
"'queued'",
"]",
"while",
"True",
":",
"job_ids",
"=",
"query_job_ids",
"(",
"build_version",
",",
"config",
")",
"statuses",
"=",
"set",
"(",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"job_ids",
"]",
")",
"if",
"'failed'",
"in",
"statuses",
":",
"job",
"=",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"job_ids",
"if",
"i",
"[",
"1",
"]",
"==",
"'failed'",
"]",
"[",
"0",
"]",
"url",
"=",
"'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'",
".",
"format",
"(",
"config",
"[",
"'owner'",
"]",
",",
"config",
"[",
"'repo'",
"]",
",",
"job",
")",
"log",
".",
"error",
"(",
"'AppVeyor job failed: %s'",
",",
"url",
")",
"raise",
"HandledError",
"if",
"statuses",
"==",
"set",
"(",
"valid_statuses",
"[",
":",
"1",
"]",
")",
":",
"log",
".",
"info",
"(",
"'Build successful. Found %d job%s.'",
",",
"len",
"(",
"job_ids",
")",
",",
"''",
"if",
"len",
"(",
"job_ids",
")",
"==",
"1",
"else",
"'s'",
")",
"break",
"if",
"'running'",
"in",
"statuses",
":",
"log",
".",
"info",
"(",
"'Waiting for job%s to finish...'",
",",
"''",
"if",
"len",
"(",
"job_ids",
")",
"==",
"1",
"else",
"'s'",
")",
"elif",
"'queued'",
"in",
"statuses",
":",
"log",
".",
"info",
"(",
"'Waiting for all jobs to start...'",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Got unknown status from AppVeyor API: %s'",
",",
"' '",
".",
"join",
"(",
"statuses",
"-",
"set",
"(",
"valid_statuses",
")",
")",
")",
"raise",
"HandledError",
"time",
".",
"sleep",
"(",
"SLEEP_FOR",
")",
"# Get artifacts.",
"artifacts",
"=",
"query_artifacts",
"(",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"job_ids",
"]",
")",
"log",
".",
"info",
"(",
"'Found %d artifact%s.'",
",",
"len",
"(",
"artifacts",
")",
",",
"''",
"if",
"len",
"(",
"artifacts",
")",
"==",
"1",
"else",
"'s'",
")",
"return",
"artifacts_urls",
"(",
"config",
",",
"artifacts",
")",
"if",
"artifacts",
"else",
"dict",
"(",
")"
] | Wait for AppVeyor job to finish and get all artifacts' URLs.
:param dict config: Dictionary from get_arguments().
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
:return: Paths and URLs from artifacts_urls.
:rtype: dict | [
"Wait",
"for",
"AppVeyor",
"job",
"to",
"finish",
"and",
"get",
"all",
"artifacts",
"URLs",
"."
] | python | train |
materialsvirtuallab/monty | monty/subprocess.py | https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/subprocess.py#L59-L99 | def run(self, timeout=None, **kwargs):
"""
Run a command in a separated thread and wait timeout seconds.
kwargs are keyword arguments passed to Popen.
Return: self
"""
from subprocess import Popen, PIPE
def target(**kw):
try:
# print('Thread started')
self.process = Popen(self.command, **kw)
self.output, self.error = self.process.communicate()
self.retcode = self.process.returncode
# print('Thread stopped')
except:
import traceback
self.error = traceback.format_exc()
self.retcode = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = PIPE
# thread
import threading
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
# print("Terminating process")
self.process.terminate()
self.killed = True
thread.join()
return self | [
"def",
"run",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"def",
"target",
"(",
"*",
"*",
"kw",
")",
":",
"try",
":",
"# print('Thread started')",
"self",
".",
"process",
"=",
"Popen",
"(",
"self",
".",
"command",
",",
"*",
"*",
"kw",
")",
"self",
".",
"output",
",",
"self",
".",
"error",
"=",
"self",
".",
"process",
".",
"communicate",
"(",
")",
"self",
".",
"retcode",
"=",
"self",
".",
"process",
".",
"returncode",
"# print('Thread stopped')",
"except",
":",
"import",
"traceback",
"self",
".",
"error",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"retcode",
"=",
"-",
"1",
"# default stdout and stderr",
"if",
"'stdout'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'stdout'",
"]",
"=",
"PIPE",
"if",
"'stderr'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'stderr'",
"]",
"=",
"PIPE",
"# thread",
"import",
"threading",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"target",
",",
"kwargs",
"=",
"kwargs",
")",
"thread",
".",
"start",
"(",
")",
"thread",
".",
"join",
"(",
"timeout",
")",
"if",
"thread",
".",
"is_alive",
"(",
")",
":",
"# print(\"Terminating process\")",
"self",
".",
"process",
".",
"terminate",
"(",
")",
"self",
".",
"killed",
"=",
"True",
"thread",
".",
"join",
"(",
")",
"return",
"self"
] | Run a command in a separated thread and wait timeout seconds.
kwargs are keyword arguments passed to Popen.
Return: self | [
"Run",
"a",
"command",
"in",
"a",
"separated",
"thread",
"and",
"wait",
"timeout",
"seconds",
".",
"kwargs",
"are",
"keyword",
"arguments",
"passed",
"to",
"Popen",
"."
] | python | train |
csparpa/pyowm | pyowm/weatherapi25/owm25.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L1113-L1132 | def uvindex_forecast_around_coords(self, lat, lon):
"""
Queries the OWM Weather API for forecast Ultra Violet values in the next 8
days in the surroundings of the provided geocoordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a list of *UVIndex* instances or empty list if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat}
json_data = self._uvapi.get_uvi_forecast(params)
uvindex_list = self._parsers['uvindex_list'].parse_JSON(json_data)
return uvindex_list | [
"def",
"uvindex_forecast_around_coords",
"(",
"self",
",",
"lat",
",",
"lon",
")",
":",
"geo",
".",
"assert_is_lon",
"(",
"lon",
")",
"geo",
".",
"assert_is_lat",
"(",
"lat",
")",
"params",
"=",
"{",
"'lon'",
":",
"lon",
",",
"'lat'",
":",
"lat",
"}",
"json_data",
"=",
"self",
".",
"_uvapi",
".",
"get_uvi_forecast",
"(",
"params",
")",
"uvindex_list",
"=",
"self",
".",
"_parsers",
"[",
"'uvindex_list'",
"]",
".",
"parse_JSON",
"(",
"json_data",
")",
"return",
"uvindex_list"
] | Queries the OWM Weather API for forecast Ultra Violet values in the next 8
days in the surroundings of the provided geocoordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a list of *UVIndex* instances or empty list if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values | [
"Queries",
"the",
"OWM",
"Weather",
"API",
"for",
"forecast",
"Ultra",
"Violet",
"values",
"in",
"the",
"next",
"8",
"days",
"in",
"the",
"surroundings",
"of",
"the",
"provided",
"geocoordinates",
"."
] | python | train |
awickert/gFlex | gflex/base.py | https://github.com/awickert/gFlex/blob/3ac32249375b0f8d342a142585d86ea4d905a5a0/gflex/base.py#L927-L1008 | def FD(self):
"""
Set-up for the finite difference solution method
"""
if self.Verbose:
print("Finite Difference Solution Technique")
# Used to check for coeff_matrix here, but now doing so in self.bc_check()
# called by f1d and f2d at the start
#
# Define a stress-based qs = q0
# But only if the latter has not already been defined
# (e.g., by the getters and setters)
try:
self.qs
except:
self.qs = self.q0.copy()
# Remove self.q0 to avoid issues with multiply-defined inputs
# q0 is the parsable input to either a qs grid or contains (x,(y),q)
del self.q0
# Give it x and y dimensions for help with plotting tools
# (not implemented internally, but a help with external methods)
self.x = np.arange(self.dx/2., self.dx * self.qs.shape[0], self.dx)
if self.dimension == 2:
self.y = np.arange(self.dy/2., self.dy * self.qs.shape[1], self.dy)
# Is there a solver defined
try:
self.Solver # See if it exists already
except:
# Well, will fail if it doesn't see this, maybe not the most reasonable
# error message.
if self.filename:
self.Solver = self.configGet("string", "numerical", "Solver")
else:
sys.exit("No solver defined!")
# Check consistency of size if coeff array was loaded
if self.filename:
# In the case that it is iterative, find the convergence criterion
self.iterative_ConvergenceTolerance = self.configGet("float", "numerical", "ConvergenceTolerance")
# Try to import Te grid or scalar for the finite difference solution
try:
self.Te = self.configGet("float", "input", "ElasticThickness", optional=False)
if self.Te is None:
Tepath = self.configGet("string", "input", "ElasticThickness", optional=False)
self.Te = Tepath
else:
Tepath = None
except:
Tepath = self.configGet("string", "input", "ElasticThickness", optional=False)
self.Te = Tepath
if self.Te is None:
if self.coeff_matrix is not None:
pass
else:
# Have to bring this out here in case it was discovered in the
# try statement that there is no value given
sys.exit("No input elastic thickness or coefficient matrix supplied.")
# or if getter/setter
if type(self.Te) == str:
# Try to import Te grid or scalar for the finite difference solution
Tepath = self.Te
else:
Tepath = None # in case no self.filename present (like for GRASS GIS)
# If there is a Tepath, import Te
# Assume that even if a coeff_matrix is defined
# That the user wants Te if they gave the path
if Tepath:
self.Te = self.loadFile(self.Te, close_on_fail = False)
if self.Te is None:
print("Requested Te file is provided but cannot be located.")
print("No scalar elastic thickness is provided in configuration file")
print("(Typo in path to input Te grid?)")
if self.coeff_matrix is not None:
print("But a coefficient matrix has been found.")
print("Calculations will be carried forward using it.")
else:
print("Exiting.")
sys.exit()
# Check that Te is the proper size if it was loaded
# Will be array if it was loaded
if self.Te.any():
self.TeArraySizeCheck() | [
"def",
"FD",
"(",
"self",
")",
":",
"if",
"self",
".",
"Verbose",
":",
"print",
"(",
"\"Finite Difference Solution Technique\"",
")",
"# Used to check for coeff_matrix here, but now doing so in self.bc_check()",
"# called by f1d and f2d at the start",
"# ",
"# Define a stress-based qs = q0",
"# But only if the latter has not already been defined",
"# (e.g., by the getters and setters)",
"try",
":",
"self",
".",
"qs",
"except",
":",
"self",
".",
"qs",
"=",
"self",
".",
"q0",
".",
"copy",
"(",
")",
"# Remove self.q0 to avoid issues with multiply-defined inputs",
"# q0 is the parsable input to either a qs grid or contains (x,(y),q)",
"del",
"self",
".",
"q0",
"# Give it x and y dimensions for help with plotting tools",
"# (not implemented internally, but a help with external methods)",
"self",
".",
"x",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"dx",
"/",
"2.",
",",
"self",
".",
"dx",
"*",
"self",
".",
"qs",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"dx",
")",
"if",
"self",
".",
"dimension",
"==",
"2",
":",
"self",
".",
"y",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"dy",
"/",
"2.",
",",
"self",
".",
"dy",
"*",
"self",
".",
"qs",
".",
"shape",
"[",
"1",
"]",
",",
"self",
".",
"dy",
")",
"# Is there a solver defined",
"try",
":",
"self",
".",
"Solver",
"# See if it exists already",
"except",
":",
"# Well, will fail if it doesn't see this, maybe not the most reasonable",
"# error message.",
"if",
"self",
".",
"filename",
":",
"self",
".",
"Solver",
"=",
"self",
".",
"configGet",
"(",
"\"string\"",
",",
"\"numerical\"",
",",
"\"Solver\"",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"\"No solver defined!\"",
")",
"# Check consistency of size if coeff array was loaded",
"if",
"self",
".",
"filename",
":",
"# In the case that it is iterative, find the convergence criterion",
"self",
".",
"iterative_ConvergenceTolerance",
"=",
"self",
".",
"configGet",
"(",
"\"float\"",
",",
"\"numerical\"",
",",
"\"ConvergenceTolerance\"",
")",
"# Try to import Te grid or scalar for the finite difference solution",
"try",
":",
"self",
".",
"Te",
"=",
"self",
".",
"configGet",
"(",
"\"float\"",
",",
"\"input\"",
",",
"\"ElasticThickness\"",
",",
"optional",
"=",
"False",
")",
"if",
"self",
".",
"Te",
"is",
"None",
":",
"Tepath",
"=",
"self",
".",
"configGet",
"(",
"\"string\"",
",",
"\"input\"",
",",
"\"ElasticThickness\"",
",",
"optional",
"=",
"False",
")",
"self",
".",
"Te",
"=",
"Tepath",
"else",
":",
"Tepath",
"=",
"None",
"except",
":",
"Tepath",
"=",
"self",
".",
"configGet",
"(",
"\"string\"",
",",
"\"input\"",
",",
"\"ElasticThickness\"",
",",
"optional",
"=",
"False",
")",
"self",
".",
"Te",
"=",
"Tepath",
"if",
"self",
".",
"Te",
"is",
"None",
":",
"if",
"self",
".",
"coeff_matrix",
"is",
"not",
"None",
":",
"pass",
"else",
":",
"# Have to bring this out here in case it was discovered in the ",
"# try statement that there is no value given",
"sys",
".",
"exit",
"(",
"\"No input elastic thickness or coefficient matrix supplied.\"",
")",
"# or if getter/setter",
"if",
"type",
"(",
"self",
".",
"Te",
")",
"==",
"str",
":",
"# Try to import Te grid or scalar for the finite difference solution",
"Tepath",
"=",
"self",
".",
"Te",
"else",
":",
"Tepath",
"=",
"None",
"# in case no self.filename present (like for GRASS GIS)",
"# If there is a Tepath, import Te",
"# Assume that even if a coeff_matrix is defined",
"# That the user wants Te if they gave the path",
"if",
"Tepath",
":",
"self",
".",
"Te",
"=",
"self",
".",
"loadFile",
"(",
"self",
".",
"Te",
",",
"close_on_fail",
"=",
"False",
")",
"if",
"self",
".",
"Te",
"is",
"None",
":",
"print",
"(",
"\"Requested Te file is provided but cannot be located.\"",
")",
"print",
"(",
"\"No scalar elastic thickness is provided in configuration file\"",
")",
"print",
"(",
"\"(Typo in path to input Te grid?)\"",
")",
"if",
"self",
".",
"coeff_matrix",
"is",
"not",
"None",
":",
"print",
"(",
"\"But a coefficient matrix has been found.\"",
")",
"print",
"(",
"\"Calculations will be carried forward using it.\"",
")",
"else",
":",
"print",
"(",
"\"Exiting.\"",
")",
"sys",
".",
"exit",
"(",
")",
"# Check that Te is the proper size if it was loaded",
"# Will be array if it was loaded",
"if",
"self",
".",
"Te",
".",
"any",
"(",
")",
":",
"self",
".",
"TeArraySizeCheck",
"(",
")"
] | Set-up for the finite difference solution method | [
"Set",
"-",
"up",
"for",
"the",
"finite",
"difference",
"solution",
"method"
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L9404-L9528 | def get_description(self):
"""Returns information about the virtual system as arrays of instruction items. In each array, the
items with the same indices correspond and jointly represent an import instruction for VirtualBox.
The list below identifies the value sets that are possible depending on the
:py:class:`VirtualSystemDescriptionType` enum value in the array item in @a aTypes[]. In each case,
the array item with the same index in @a OVFValues[] will contain the original value as contained
in the OVF file (just for informational purposes), and the corresponding item in @a aVBoxValues[]
will contain a suggested value to be used for VirtualBox. Depending on the description type,
the @a aExtraConfigValues[] array item may also be used.
"OS": the guest operating system type. There must be exactly one such array item on import. The
corresponding item in @a aVBoxValues[] contains the suggested guest operating system for VirtualBox.
This will be one of the values listed in :py:func:`IVirtualBox.guest_os_types` . The corresponding
item in @a OVFValues[] will contain a numerical value that described the operating system in the OVF.
"Name": the name to give to the new virtual machine. There can be at most one such array item;
if none is present on import, then an automatic name will be created from the operating system
type. The corresponding item im @a OVFValues[] will contain the suggested virtual machine name
from the OVF file, and @a aVBoxValues[] will contain a suggestion for a unique VirtualBox
:py:class:`IMachine` name that does not exist yet.
"Description": an arbitrary description.
"License": the EULA section from the OVF, if present. It is the responsibility of the calling
code to display such a license for agreement; the Main API does not enforce any such policy.
Miscellaneous: reserved for future use.
"CPU": the number of CPUs. There can be at most one such item, which will presently be ignored.
"Memory": the amount of guest RAM, in bytes. There can be at most one such array item; if none
is present on import, then VirtualBox will set a meaningful default based on the operating system
type.
"HardDiskControllerIDE": an IDE hard disk controller. There can be at most two such items.
An optional value in @a OVFValues[] and @a aVBoxValues[] can be "PIIX3" or "PIIX4" to specify
the type of IDE controller; this corresponds to the ResourceSubType element which VirtualBox
writes into the OVF.
The matching item in the @a aRefs[] array will contain an integer that items of the "Harddisk"
type can use to specify which hard disk controller a virtual disk should be connected to.
Note that in OVF, an IDE controller has two channels, corresponding to "master" and "slave"
in traditional terminology, whereas the IDE storage controller that VirtualBox supports in
its virtual machines supports four channels (primary master, primary slave, secondary master,
secondary slave) and thus maps to two IDE controllers in the OVF sense.
"HardDiskControllerSATA": an SATA hard disk controller. There can be at most one such item. This
has no value in @a OVFValues[] or @a aVBoxValues[].
The matching item in the @a aRefs[] array will be used as with IDE controllers (see above).
"HardDiskControllerSCSI": a SCSI hard disk controller. There can be at most one such item.
The items in @a OVFValues[] and @a aVBoxValues[] will either be "LsiLogic", "BusLogic" or
"LsiLogicSas". (Note that in OVF, the LsiLogicSas controller is treated as a SCSI controller
whereas VirtualBox considers it a class of storage controllers of its own; see
:py:class:`StorageControllerType` ).
The matching item in the @a aRefs[] array will be used as with IDE controllers (see above).
"HardDiskImage": a virtual hard disk, most probably as a reference to an image file. There can be an
arbitrary number of these items, one for each virtual disk image that accompanies the OVF.
The array item in @a OVFValues[] will contain the file specification from the OVF file (without
a path since the image file should be in the same location as the OVF file itself), whereas the
item in @a aVBoxValues[] will contain a qualified path specification to where VirtualBox uses the
hard disk image. This means that on import the image will be copied and converted from the
"ovf" location to the "vbox" location; on export, this will be handled the other way round.
The matching item in the @a aExtraConfigValues[] array must contain a string of the following
format: "controller=<index>;channel=<c>"
In this string, <index> must be an integer specifying the hard disk controller to connect
the image to. That number must be the index of an array item with one of the hard disk controller
types (HardDiskControllerSCSI, HardDiskControllerSATA, HardDiskControllerIDE).
In addition, <c> must specify the channel to use on that controller. For IDE controllers,
this can be 0 or 1 for master or slave, respectively. For compatibility with VirtualBox versions
before 3.2, the values 2 and 3 (for secondary master and secondary slave) are also supported, but
no longer exported. For SATA and SCSI controllers, the channel can range from 0-29.
"CDROM": a virtual CD-ROM drive. The matching item in @a aExtraConfigValue[] contains the same
attachment information as with "HardDiskImage" items.
"CDROM": a virtual floppy drive. The matching item in @a aExtraConfigValue[] contains the same
attachment information as with "HardDiskImage" items.
"NetworkAdapter": a network adapter. The array item in @a aVBoxValues[] will specify the hardware
for the network adapter, whereas the array item in @a aExtraConfigValues[] will have a string
of the "type=<X>" format, where <X> must be either "NAT" or "Bridged".
"USBController": a USB controller. There can be at most one such item. If, and only if, such an
item is present, USB support will be enabled for the new virtual machine.
"SoundCard": a sound card. There can be at most one such item. If and only if such an item is
present, sound support will be enabled for the new virtual machine. Note that the virtual
machine in VirtualBox will always be presented with the standard VirtualBox soundcard, which
may be different from the virtual soundcard expected by the appliance.
out types of type :class:`VirtualSystemDescriptionType`
out refs of type str
out ovf_values of type str
out v_box_values of type str
out extra_config_values of type str
"""
(types, refs, ovf_values, v_box_values, extra_config_values) = self._call("getDescription")
types = [VirtualSystemDescriptionType(a) for a in types]
return (types, refs, ovf_values, v_box_values, extra_config_values) | [
"def",
"get_description",
"(",
"self",
")",
":",
"(",
"types",
",",
"refs",
",",
"ovf_values",
",",
"v_box_values",
",",
"extra_config_values",
")",
"=",
"self",
".",
"_call",
"(",
"\"getDescription\"",
")",
"types",
"=",
"[",
"VirtualSystemDescriptionType",
"(",
"a",
")",
"for",
"a",
"in",
"types",
"]",
"return",
"(",
"types",
",",
"refs",
",",
"ovf_values",
",",
"v_box_values",
",",
"extra_config_values",
")"
] | Returns information about the virtual system as arrays of instruction items. In each array, the
items with the same indices correspond and jointly represent an import instruction for VirtualBox.
The list below identifies the value sets that are possible depending on the
:py:class:`VirtualSystemDescriptionType` enum value in the array item in @a aTypes[]. In each case,
the array item with the same index in @a OVFValues[] will contain the original value as contained
in the OVF file (just for informational purposes), and the corresponding item in @a aVBoxValues[]
will contain a suggested value to be used for VirtualBox. Depending on the description type,
the @a aExtraConfigValues[] array item may also be used.
"OS": the guest operating system type. There must be exactly one such array item on import. The
corresponding item in @a aVBoxValues[] contains the suggested guest operating system for VirtualBox.
This will be one of the values listed in :py:func:`IVirtualBox.guest_os_types` . The corresponding
item in @a OVFValues[] will contain a numerical value that described the operating system in the OVF.
"Name": the name to give to the new virtual machine. There can be at most one such array item;
if none is present on import, then an automatic name will be created from the operating system
type. The corresponding item im @a OVFValues[] will contain the suggested virtual machine name
from the OVF file, and @a aVBoxValues[] will contain a suggestion for a unique VirtualBox
:py:class:`IMachine` name that does not exist yet.
"Description": an arbitrary description.
"License": the EULA section from the OVF, if present. It is the responsibility of the calling
code to display such a license for agreement; the Main API does not enforce any such policy.
Miscellaneous: reserved for future use.
"CPU": the number of CPUs. There can be at most one such item, which will presently be ignored.
"Memory": the amount of guest RAM, in bytes. There can be at most one such array item; if none
is present on import, then VirtualBox will set a meaningful default based on the operating system
type.
"HardDiskControllerIDE": an IDE hard disk controller. There can be at most two such items.
An optional value in @a OVFValues[] and @a aVBoxValues[] can be "PIIX3" or "PIIX4" to specify
the type of IDE controller; this corresponds to the ResourceSubType element which VirtualBox
writes into the OVF.
The matching item in the @a aRefs[] array will contain an integer that items of the "Harddisk"
type can use to specify which hard disk controller a virtual disk should be connected to.
Note that in OVF, an IDE controller has two channels, corresponding to "master" and "slave"
in traditional terminology, whereas the IDE storage controller that VirtualBox supports in
its virtual machines supports four channels (primary master, primary slave, secondary master,
secondary slave) and thus maps to two IDE controllers in the OVF sense.
"HardDiskControllerSATA": an SATA hard disk controller. There can be at most one such item. This
has no value in @a OVFValues[] or @a aVBoxValues[].
The matching item in the @a aRefs[] array will be used as with IDE controllers (see above).
"HardDiskControllerSCSI": a SCSI hard disk controller. There can be at most one such item.
The items in @a OVFValues[] and @a aVBoxValues[] will either be "LsiLogic", "BusLogic" or
"LsiLogicSas". (Note that in OVF, the LsiLogicSas controller is treated as a SCSI controller
whereas VirtualBox considers it a class of storage controllers of its own; see
:py:class:`StorageControllerType` ).
The matching item in the @a aRefs[] array will be used as with IDE controllers (see above).
"HardDiskImage": a virtual hard disk, most probably as a reference to an image file. There can be an
arbitrary number of these items, one for each virtual disk image that accompanies the OVF.
The array item in @a OVFValues[] will contain the file specification from the OVF file (without
a path since the image file should be in the same location as the OVF file itself), whereas the
item in @a aVBoxValues[] will contain a qualified path specification to where VirtualBox uses the
hard disk image. This means that on import the image will be copied and converted from the
"ovf" location to the "vbox" location; on export, this will be handled the other way round.
The matching item in the @a aExtraConfigValues[] array must contain a string of the following
format: "controller=<index>;channel=<c>"
In this string, <index> must be an integer specifying the hard disk controller to connect
the image to. That number must be the index of an array item with one of the hard disk controller
types (HardDiskControllerSCSI, HardDiskControllerSATA, HardDiskControllerIDE).
In addition, <c> must specify the channel to use on that controller. For IDE controllers,
this can be 0 or 1 for master or slave, respectively. For compatibility with VirtualBox versions
before 3.2, the values 2 and 3 (for secondary master and secondary slave) are also supported, but
no longer exported. For SATA and SCSI controllers, the channel can range from 0-29.
"CDROM": a virtual CD-ROM drive. The matching item in @a aExtraConfigValue[] contains the same
attachment information as with "HardDiskImage" items.
"CDROM": a virtual floppy drive. The matching item in @a aExtraConfigValue[] contains the same
attachment information as with "HardDiskImage" items.
"NetworkAdapter": a network adapter. The array item in @a aVBoxValues[] will specify the hardware
for the network adapter, whereas the array item in @a aExtraConfigValues[] will have a string
of the "type=<X>" format, where <X> must be either "NAT" or "Bridged".
"USBController": a USB controller. There can be at most one such item. If, and only if, such an
item is present, USB support will be enabled for the new virtual machine.
"SoundCard": a sound card. There can be at most one such item. If and only if such an item is
present, sound support will be enabled for the new virtual machine. Note that the virtual
machine in VirtualBox will always be presented with the standard VirtualBox soundcard, which
may be different from the virtual soundcard expected by the appliance.
out types of type :class:`VirtualSystemDescriptionType`
out refs of type str
out ovf_values of type str
out v_box_values of type str
out extra_config_values of type str | [
"Returns",
"information",
"about",
"the",
"virtual",
"system",
"as",
"arrays",
"of",
"instruction",
"items",
".",
"In",
"each",
"array",
"the",
"items",
"with",
"the",
"same",
"indices",
"correspond",
"and",
"jointly",
"represent",
"an",
"import",
"instruction",
"for",
"VirtualBox",
".",
"The",
"list",
"below",
"identifies",
"the",
"value",
"sets",
"that",
"are",
"possible",
"depending",
"on",
"the",
":",
"py",
":",
"class",
":",
"VirtualSystemDescriptionType",
"enum",
"value",
"in",
"the",
"array",
"item",
"in",
"@a",
"aTypes",
"[]",
".",
"In",
"each",
"case",
"the",
"array",
"item",
"with",
"the",
"same",
"index",
"in",
"@a",
"OVFValues",
"[]",
"will",
"contain",
"the",
"original",
"value",
"as",
"contained",
"in",
"the",
"OVF",
"file",
"(",
"just",
"for",
"informational",
"purposes",
")",
"and",
"the",
"corresponding",
"item",
"in",
"@a",
"aVBoxValues",
"[]",
"will",
"contain",
"a",
"suggested",
"value",
"to",
"be",
"used",
"for",
"VirtualBox",
".",
"Depending",
"on",
"the",
"description",
"type",
"the",
"@a",
"aExtraConfigValues",
"[]",
"array",
"item",
"may",
"also",
"be",
"used",
".",
"OS",
":",
"the",
"guest",
"operating",
"system",
"type",
".",
"There",
"must",
"be",
"exactly",
"one",
"such",
"array",
"item",
"on",
"import",
".",
"The",
"corresponding",
"item",
"in",
"@a",
"aVBoxValues",
"[]",
"contains",
"the",
"suggested",
"guest",
"operating",
"system",
"for",
"VirtualBox",
".",
"This",
"will",
"be",
"one",
"of",
"the",
"values",
"listed",
"in",
":",
"py",
":",
"func",
":",
"IVirtualBox",
".",
"guest_os_types",
".",
"The",
"corresponding",
"item",
"in",
"@a",
"OVFValues",
"[]",
"will",
"contain",
"a",
"numerical",
"value",
"that",
"described",
"the",
"operating",
"system",
"in",
"the",
"OVF",
".",
"Name",
":",
"the",
"name",
"to",
"give",
"to",
"the",
"new",
"virtual",
"machine",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"array",
"item",
";",
"if",
"none",
"is",
"present",
"on",
"import",
"then",
"an",
"automatic",
"name",
"will",
"be",
"created",
"from",
"the",
"operating",
"system",
"type",
".",
"The",
"corresponding",
"item",
"im",
"@a",
"OVFValues",
"[]",
"will",
"contain",
"the",
"suggested",
"virtual",
"machine",
"name",
"from",
"the",
"OVF",
"file",
"and",
"@a",
"aVBoxValues",
"[]",
"will",
"contain",
"a",
"suggestion",
"for",
"a",
"unique",
"VirtualBox",
":",
"py",
":",
"class",
":",
"IMachine",
"name",
"that",
"does",
"not",
"exist",
"yet",
".",
"Description",
":",
"an",
"arbitrary",
"description",
".",
"License",
":",
"the",
"EULA",
"section",
"from",
"the",
"OVF",
"if",
"present",
".",
"It",
"is",
"the",
"responsibility",
"of",
"the",
"calling",
"code",
"to",
"display",
"such",
"a",
"license",
"for",
"agreement",
";",
"the",
"Main",
"API",
"does",
"not",
"enforce",
"any",
"such",
"policy",
".",
"Miscellaneous",
":",
"reserved",
"for",
"future",
"use",
".",
"CPU",
":",
"the",
"number",
"of",
"CPUs",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"item",
"which",
"will",
"presently",
"be",
"ignored",
".",
"Memory",
":",
"the",
"amount",
"of",
"guest",
"RAM",
"in",
"bytes",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"array",
"item",
";",
"if",
"none",
"is",
"present",
"on",
"import",
"then",
"VirtualBox",
"will",
"set",
"a",
"meaningful",
"default",
"based",
"on",
"the",
"operating",
"system",
"type",
".",
"HardDiskControllerIDE",
":",
"an",
"IDE",
"hard",
"disk",
"controller",
".",
"There",
"can",
"be",
"at",
"most",
"two",
"such",
"items",
".",
"An",
"optional",
"value",
"in",
"@a",
"OVFValues",
"[]",
"and",
"@a",
"aVBoxValues",
"[]",
"can",
"be",
"PIIX3",
"or",
"PIIX4",
"to",
"specify",
"the",
"type",
"of",
"IDE",
"controller",
";",
"this",
"corresponds",
"to",
"the",
"ResourceSubType",
"element",
"which",
"VirtualBox",
"writes",
"into",
"the",
"OVF",
".",
"The",
"matching",
"item",
"in",
"the",
"@a",
"aRefs",
"[]",
"array",
"will",
"contain",
"an",
"integer",
"that",
"items",
"of",
"the",
"Harddisk",
"type",
"can",
"use",
"to",
"specify",
"which",
"hard",
"disk",
"controller",
"a",
"virtual",
"disk",
"should",
"be",
"connected",
"to",
".",
"Note",
"that",
"in",
"OVF",
"an",
"IDE",
"controller",
"has",
"two",
"channels",
"corresponding",
"to",
"master",
"and",
"slave",
"in",
"traditional",
"terminology",
"whereas",
"the",
"IDE",
"storage",
"controller",
"that",
"VirtualBox",
"supports",
"in",
"its",
"virtual",
"machines",
"supports",
"four",
"channels",
"(",
"primary",
"master",
"primary",
"slave",
"secondary",
"master",
"secondary",
"slave",
")",
"and",
"thus",
"maps",
"to",
"two",
"IDE",
"controllers",
"in",
"the",
"OVF",
"sense",
".",
"HardDiskControllerSATA",
":",
"an",
"SATA",
"hard",
"disk",
"controller",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"item",
".",
"This",
"has",
"no",
"value",
"in",
"@a",
"OVFValues",
"[]",
"or",
"@a",
"aVBoxValues",
"[]",
".",
"The",
"matching",
"item",
"in",
"the",
"@a",
"aRefs",
"[]",
"array",
"will",
"be",
"used",
"as",
"with",
"IDE",
"controllers",
"(",
"see",
"above",
")",
".",
"HardDiskControllerSCSI",
":",
"a",
"SCSI",
"hard",
"disk",
"controller",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"item",
".",
"The",
"items",
"in",
"@a",
"OVFValues",
"[]",
"and",
"@a",
"aVBoxValues",
"[]",
"will",
"either",
"be",
"LsiLogic",
"BusLogic",
"or",
"LsiLogicSas",
".",
"(",
"Note",
"that",
"in",
"OVF",
"the",
"LsiLogicSas",
"controller",
"is",
"treated",
"as",
"a",
"SCSI",
"controller",
"whereas",
"VirtualBox",
"considers",
"it",
"a",
"class",
"of",
"storage",
"controllers",
"of",
"its",
"own",
";",
"see",
":",
"py",
":",
"class",
":",
"StorageControllerType",
")",
".",
"The",
"matching",
"item",
"in",
"the",
"@a",
"aRefs",
"[]",
"array",
"will",
"be",
"used",
"as",
"with",
"IDE",
"controllers",
"(",
"see",
"above",
")",
".",
"HardDiskImage",
":",
"a",
"virtual",
"hard",
"disk",
"most",
"probably",
"as",
"a",
"reference",
"to",
"an",
"image",
"file",
".",
"There",
"can",
"be",
"an",
"arbitrary",
"number",
"of",
"these",
"items",
"one",
"for",
"each",
"virtual",
"disk",
"image",
"that",
"accompanies",
"the",
"OVF",
".",
"The",
"array",
"item",
"in",
"@a",
"OVFValues",
"[]",
"will",
"contain",
"the",
"file",
"specification",
"from",
"the",
"OVF",
"file",
"(",
"without",
"a",
"path",
"since",
"the",
"image",
"file",
"should",
"be",
"in",
"the",
"same",
"location",
"as",
"the",
"OVF",
"file",
"itself",
")",
"whereas",
"the",
"item",
"in",
"@a",
"aVBoxValues",
"[]",
"will",
"contain",
"a",
"qualified",
"path",
"specification",
"to",
"where",
"VirtualBox",
"uses",
"the",
"hard",
"disk",
"image",
".",
"This",
"means",
"that",
"on",
"import",
"the",
"image",
"will",
"be",
"copied",
"and",
"converted",
"from",
"the",
"ovf",
"location",
"to",
"the",
"vbox",
"location",
";",
"on",
"export",
"this",
"will",
"be",
"handled",
"the",
"other",
"way",
"round",
".",
"The",
"matching",
"item",
"in",
"the",
"@a",
"aExtraConfigValues",
"[]",
"array",
"must",
"contain",
"a",
"string",
"of",
"the",
"following",
"format",
":",
"controller",
"=",
"<index",
">",
";",
"channel",
"=",
"<c",
">",
"In",
"this",
"string",
"<index",
">",
"must",
"be",
"an",
"integer",
"specifying",
"the",
"hard",
"disk",
"controller",
"to",
"connect",
"the",
"image",
"to",
".",
"That",
"number",
"must",
"be",
"the",
"index",
"of",
"an",
"array",
"item",
"with",
"one",
"of",
"the",
"hard",
"disk",
"controller",
"types",
"(",
"HardDiskControllerSCSI",
"HardDiskControllerSATA",
"HardDiskControllerIDE",
")",
".",
"In",
"addition",
"<c",
">",
"must",
"specify",
"the",
"channel",
"to",
"use",
"on",
"that",
"controller",
".",
"For",
"IDE",
"controllers",
"this",
"can",
"be",
"0",
"or",
"1",
"for",
"master",
"or",
"slave",
"respectively",
".",
"For",
"compatibility",
"with",
"VirtualBox",
"versions",
"before",
"3",
".",
"2",
"the",
"values",
"2",
"and",
"3",
"(",
"for",
"secondary",
"master",
"and",
"secondary",
"slave",
")",
"are",
"also",
"supported",
"but",
"no",
"longer",
"exported",
".",
"For",
"SATA",
"and",
"SCSI",
"controllers",
"the",
"channel",
"can",
"range",
"from",
"0",
"-",
"29",
".",
"CDROM",
":",
"a",
"virtual",
"CD",
"-",
"ROM",
"drive",
".",
"The",
"matching",
"item",
"in",
"@a",
"aExtraConfigValue",
"[]",
"contains",
"the",
"same",
"attachment",
"information",
"as",
"with",
"HardDiskImage",
"items",
".",
"CDROM",
":",
"a",
"virtual",
"floppy",
"drive",
".",
"The",
"matching",
"item",
"in",
"@a",
"aExtraConfigValue",
"[]",
"contains",
"the",
"same",
"attachment",
"information",
"as",
"with",
"HardDiskImage",
"items",
".",
"NetworkAdapter",
":",
"a",
"network",
"adapter",
".",
"The",
"array",
"item",
"in",
"@a",
"aVBoxValues",
"[]",
"will",
"specify",
"the",
"hardware",
"for",
"the",
"network",
"adapter",
"whereas",
"the",
"array",
"item",
"in",
"@a",
"aExtraConfigValues",
"[]",
"will",
"have",
"a",
"string",
"of",
"the",
"type",
"=",
"<X",
">",
"format",
"where",
"<X",
">",
"must",
"be",
"either",
"NAT",
"or",
"Bridged",
".",
"USBController",
":",
"a",
"USB",
"controller",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"item",
".",
"If",
"and",
"only",
"if",
"such",
"an",
"item",
"is",
"present",
"USB",
"support",
"will",
"be",
"enabled",
"for",
"the",
"new",
"virtual",
"machine",
".",
"SoundCard",
":",
"a",
"sound",
"card",
".",
"There",
"can",
"be",
"at",
"most",
"one",
"such",
"item",
".",
"If",
"and",
"only",
"if",
"such",
"an",
"item",
"is",
"present",
"sound",
"support",
"will",
"be",
"enabled",
"for",
"the",
"new",
"virtual",
"machine",
".",
"Note",
"that",
"the",
"virtual",
"machine",
"in",
"VirtualBox",
"will",
"always",
"be",
"presented",
"with",
"the",
"standard",
"VirtualBox",
"soundcard",
"which",
"may",
"be",
"different",
"from",
"the",
"virtual",
"soundcard",
"expected",
"by",
"the",
"appliance",
"."
] | python | train |
willkg/socorro-siggen | siggen/rules.py | https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L268-L351 | def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes | [
"def",
"_do_generate",
"(",
"self",
",",
"source_list",
",",
"hang_type",
",",
"crashed_thread",
",",
"delimiter",
"=",
"' | '",
")",
":",
"notes",
"=",
"[",
"]",
"debug_notes",
"=",
"[",
"]",
"# shorten source_list to the first signatureSentinel",
"sentinel_locations",
"=",
"[",
"]",
"for",
"a_sentinel",
"in",
"self",
".",
"signature_sentinels",
":",
"if",
"type",
"(",
"a_sentinel",
")",
"==",
"tuple",
":",
"a_sentinel",
",",
"condition_fn",
"=",
"a_sentinel",
"if",
"not",
"condition_fn",
"(",
"source_list",
")",
":",
"continue",
"try",
":",
"sentinel_locations",
".",
"append",
"(",
"source_list",
".",
"index",
"(",
"a_sentinel",
")",
")",
"except",
"ValueError",
":",
"pass",
"if",
"sentinel_locations",
":",
"min_index",
"=",
"min",
"(",
"sentinel_locations",
")",
"debug_notes",
".",
"append",
"(",
"'sentinel; starting at \"{}\" index {}'",
".",
"format",
"(",
"source_list",
"[",
"min_index",
"]",
",",
"min_index",
")",
")",
"source_list",
"=",
"source_list",
"[",
"min_index",
":",
"]",
"# Get all the relevant frame signatures. Note that these function signatures",
"# have already been normalized at this point.",
"new_signature_list",
"=",
"[",
"]",
"for",
"a_signature",
"in",
"source_list",
":",
"# If the signature matches the irrelevant signatures regex, skip to the next frame.",
"if",
"self",
".",
"irrelevant_signature_re",
".",
"match",
"(",
"a_signature",
")",
":",
"debug_notes",
".",
"append",
"(",
"'irrelevant; ignoring: \"{}\"'",
".",
"format",
"(",
"a_signature",
")",
")",
"continue",
"# If the frame signature is a dll, remove the @xxxxx part.",
"if",
"'.dll'",
"in",
"a_signature",
".",
"lower",
"(",
")",
":",
"a_signature",
"=",
"a_signature",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"# If this trimmed DLL signature is the same as the previous frame's, skip it.",
"if",
"new_signature_list",
"and",
"a_signature",
"==",
"new_signature_list",
"[",
"-",
"1",
"]",
":",
"continue",
"new_signature_list",
".",
"append",
"(",
"a_signature",
")",
"# If the signature does not match the prefix signatures regex, then it is the last",
"# one we add to the list.",
"if",
"not",
"self",
".",
"prefix_signature_re",
".",
"match",
"(",
"a_signature",
")",
":",
"debug_notes",
".",
"append",
"(",
"'not a prefix; stop: \"{}\"'",
".",
"format",
"(",
"a_signature",
")",
")",
"break",
"debug_notes",
".",
"append",
"(",
"'prefix; continue iterating: \"{}\"'",
".",
"format",
"(",
"a_signature",
")",
")",
"# Add a special marker for hang crash reports.",
"if",
"hang_type",
":",
"debug_notes",
".",
"append",
"(",
"'hang_type {}: prepending {}'",
".",
"format",
"(",
"hang_type",
",",
"self",
".",
"hang_prefixes",
"[",
"hang_type",
"]",
")",
")",
"new_signature_list",
".",
"insert",
"(",
"0",
",",
"self",
".",
"hang_prefixes",
"[",
"hang_type",
"]",
")",
"signature",
"=",
"delimiter",
".",
"join",
"(",
"new_signature_list",
")",
"# Handle empty signatures to explain why we failed generating them.",
"if",
"signature",
"==",
"''",
"or",
"signature",
"is",
"None",
":",
"if",
"crashed_thread",
"is",
"None",
":",
"notes",
".",
"append",
"(",
"\"CSignatureTool: No signature could be created because we do not know which \"",
"\"thread crashed\"",
")",
"signature",
"=",
"\"EMPTY: no crashing thread identified\"",
"else",
":",
"notes",
".",
"append",
"(",
"\"CSignatureTool: No proper signature could be created because no good data \"",
"\"for the crashing thread ({}) was found\"",
".",
"format",
"(",
"crashed_thread",
")",
")",
"try",
":",
"signature",
"=",
"source_list",
"[",
"0",
"]",
"except",
"IndexError",
":",
"signature",
"=",
"\"EMPTY: no frame data available\"",
"return",
"signature",
",",
"notes",
",",
"debug_notes"
] | each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names. | [
"each",
"element",
"of",
"signatureList",
"names",
"a",
"frame",
"in",
"the",
"crash",
"stack",
";",
"and",
"is",
":",
"-",
"a",
"prefix",
"of",
"a",
"relevant",
"frame",
":",
"Append",
"this",
"element",
"to",
"the",
"signature",
"-",
"a",
"relevant",
"frame",
":",
"Append",
"this",
"element",
"and",
"stop",
"looking",
"-",
"irrelevant",
":",
"Append",
"this",
"element",
"only",
"after",
"seeing",
"a",
"prefix",
"frame",
"The",
"signature",
"is",
"a",
"|",
"separated",
"string",
"of",
"frame",
"names",
"."
] | python | train |
saltstack/salt | salt/states/boto_cognitoidentity.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_cognitoidentity.py#L66-L90 | def _get_object(objname, objtype):
'''
Helper function to retrieve objtype from pillars if objname
is string_types, used for SupportedLoginProviders and
OpenIdConnectProviderARNs.
'''
ret = None
if objname is None:
return ret
if isinstance(objname, string_types):
if objname in __opts__:
ret = __opts__[objname]
master_opts = __pillar__.get('master', {})
if objname in master_opts:
ret = master_opts[objname]
if objname in __pillar__:
ret = __pillar__[objname]
elif isinstance(objname, objtype):
ret = objname
if not isinstance(ret, objtype):
ret = None
return ret | [
"def",
"_get_object",
"(",
"objname",
",",
"objtype",
")",
":",
"ret",
"=",
"None",
"if",
"objname",
"is",
"None",
":",
"return",
"ret",
"if",
"isinstance",
"(",
"objname",
",",
"string_types",
")",
":",
"if",
"objname",
"in",
"__opts__",
":",
"ret",
"=",
"__opts__",
"[",
"objname",
"]",
"master_opts",
"=",
"__pillar__",
".",
"get",
"(",
"'master'",
",",
"{",
"}",
")",
"if",
"objname",
"in",
"master_opts",
":",
"ret",
"=",
"master_opts",
"[",
"objname",
"]",
"if",
"objname",
"in",
"__pillar__",
":",
"ret",
"=",
"__pillar__",
"[",
"objname",
"]",
"elif",
"isinstance",
"(",
"objname",
",",
"objtype",
")",
":",
"ret",
"=",
"objname",
"if",
"not",
"isinstance",
"(",
"ret",
",",
"objtype",
")",
":",
"ret",
"=",
"None",
"return",
"ret"
] | Helper function to retrieve objtype from pillars if objname
is string_types, used for SupportedLoginProviders and
OpenIdConnectProviderARNs. | [
"Helper",
"function",
"to",
"retrieve",
"objtype",
"from",
"pillars",
"if",
"objname",
"is",
"string_types",
"used",
"for",
"SupportedLoginProviders",
"and",
"OpenIdConnectProviderARNs",
"."
] | python | train |
MolSSI-BSE/basis_set_exchange | basis_set_exchange/api.py | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/api.py#L297-L314 | def get_metadata(data_dir=None):
'''Obtain the metadata for all basis sets
The metadata includes information such as the display name of the basis set,
its versions, and what elements are included in the basis set
The data is read from the METADATA.json file in the `data_dir` directory.
Parameters
----------
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
'''
data_dir = fix_data_dir(data_dir)
metadata_file = os.path.join(data_dir, "METADATA.json")
return fileio.read_metadata(metadata_file) | [
"def",
"get_metadata",
"(",
"data_dir",
"=",
"None",
")",
":",
"data_dir",
"=",
"fix_data_dir",
"(",
"data_dir",
")",
"metadata_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"METADATA.json\"",
")",
"return",
"fileio",
".",
"read_metadata",
"(",
"metadata_file",
")"
] | Obtain the metadata for all basis sets
The metadata includes information such as the display name of the basis set,
its versions, and what elements are included in the basis set
The data is read from the METADATA.json file in the `data_dir` directory.
Parameters
----------
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project. | [
"Obtain",
"the",
"metadata",
"for",
"all",
"basis",
"sets"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.