repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
abilian/abilian-core | abilian/core/sqlalchemy.py | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/core/sqlalchemy.py#L114-L124 | def filter_cols(model, *filtered_columns):
"""Return columnsnames for a model except named ones.
Useful for defer() for example to retain only columns of interest
"""
m = sa.orm.class_mapper(model)
return list(
{p.key for p in m.iterate_properties if hasattr(p, "columns")}.difference(
filtered_columns
)
) | [
"def",
"filter_cols",
"(",
"model",
",",
"*",
"filtered_columns",
")",
":",
"m",
"=",
"sa",
".",
"orm",
".",
"class_mapper",
"(",
"model",
")",
"return",
"list",
"(",
"{",
"p",
".",
"key",
"for",
"p",
"in",
"m",
".",
"iterate_properties",
"if",
"hasattr",
"(",
"p",
",",
"\"columns\"",
")",
"}",
".",
"difference",
"(",
"filtered_columns",
")",
")"
]
| Return columnsnames for a model except named ones.
Useful for defer() for example to retain only columns of interest | [
"Return",
"columnsnames",
"for",
"a",
"model",
"except",
"named",
"ones",
"."
]
| python | train |
OpenGov/carpenter | carpenter/carpenter.py | https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/carpenter.py#L110-L128 | def split_block_by_row_length(block, split_row_length):
'''
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
'''
split_blocks = []
current_block = []
for row in block:
if row_content_length(row) <= split_row_length:
if current_block:
split_blocks.append(current_block)
split_blocks.append([row])
current_block = []
else:
current_block.append(row)
if current_block:
split_blocks.append(current_block)
return split_blocks | [
"def",
"split_block_by_row_length",
"(",
"block",
",",
"split_row_length",
")",
":",
"split_blocks",
"=",
"[",
"]",
"current_block",
"=",
"[",
"]",
"for",
"row",
"in",
"block",
":",
"if",
"row_content_length",
"(",
"row",
")",
"<=",
"split_row_length",
":",
"if",
"current_block",
":",
"split_blocks",
".",
"append",
"(",
"current_block",
")",
"split_blocks",
".",
"append",
"(",
"[",
"row",
"]",
")",
"current_block",
"=",
"[",
"]",
"else",
":",
"current_block",
".",
"append",
"(",
"row",
")",
"if",
"current_block",
":",
"split_blocks",
".",
"append",
"(",
"current_block",
")",
"return",
"split_blocks"
]
| Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input. | [
"Splits",
"the",
"block",
"by",
"finding",
"all",
"rows",
"with",
"less",
"consequetive",
"non",
"-",
"empty",
"rows",
"than",
"the",
"min_row_length",
"input",
"."
]
| python | train |
google/openhtf | openhtf/util/conf.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L463-L481 | def help_text(self):
"""Return a string with all config keys and their descriptions."""
result = []
for name in sorted(self._declarations.keys()):
result.append(name)
result.append('-' * len(name))
decl = self._declarations[name]
if decl.description:
result.append(decl.description.strip())
else:
result.append('(no description found)')
if decl.has_default:
result.append('')
quotes = '"' if type(decl.default_value) is str else ''
result.append(' default_value={quotes}{val}{quotes}'.format(
quotes=quotes, val=decl.default_value))
result.append('')
result.append('')
return '\n'.join(result) | [
"def",
"help_text",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"name",
"in",
"sorted",
"(",
"self",
".",
"_declarations",
".",
"keys",
"(",
")",
")",
":",
"result",
".",
"append",
"(",
"name",
")",
"result",
".",
"append",
"(",
"'-'",
"*",
"len",
"(",
"name",
")",
")",
"decl",
"=",
"self",
".",
"_declarations",
"[",
"name",
"]",
"if",
"decl",
".",
"description",
":",
"result",
".",
"append",
"(",
"decl",
".",
"description",
".",
"strip",
"(",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"'(no description found)'",
")",
"if",
"decl",
".",
"has_default",
":",
"result",
".",
"append",
"(",
"''",
")",
"quotes",
"=",
"'\"'",
"if",
"type",
"(",
"decl",
".",
"default_value",
")",
"is",
"str",
"else",
"''",
"result",
".",
"append",
"(",
"' default_value={quotes}{val}{quotes}'",
".",
"format",
"(",
"quotes",
"=",
"quotes",
",",
"val",
"=",
"decl",
".",
"default_value",
")",
")",
"result",
".",
"append",
"(",
"''",
")",
"result",
".",
"append",
"(",
"''",
")",
"return",
"'\\n'",
".",
"join",
"(",
"result",
")"
]
| Return a string with all config keys and their descriptions. | [
"Return",
"a",
"string",
"with",
"all",
"config",
"keys",
"and",
"their",
"descriptions",
"."
]
| python | train |
stbraun/fuzzing | features/steps/ft_singleton.py | https://github.com/stbraun/fuzzing/blob/974a64472732d4e40db919d242149bf0856fe199/features/steps/ft_singleton.py#L74-L80 | def step_impl07(context):
"""Test for singleton property.
:param context: test context.
"""
assert context.st_1 is context.st_2
assert context.st_2 is context.st_3 | [
"def",
"step_impl07",
"(",
"context",
")",
":",
"assert",
"context",
".",
"st_1",
"is",
"context",
".",
"st_2",
"assert",
"context",
".",
"st_2",
"is",
"context",
".",
"st_3"
]
| Test for singleton property.
:param context: test context. | [
"Test",
"for",
"singleton",
"property",
"."
]
| python | train |
entrepreneur-interet-general/mkinx | mkinx/commands.py | https://github.com/entrepreneur-interet-general/mkinx/blob/70ccf81d3fad974283829ca4ec069a873341461d/mkinx/commands.py#L141-L243 | def build(args):
"""Build the documentation for the projects specified in the CLI.
It will do 4 different things for each project the
user asks for (see flags):
1. Update mkdocs's index.md file with links to project
documentations
2. Build these documentations
3. Update the documentations' index.html file to add a link
back to the home of all documentations
4. Build mkdoc's home documentation
Args:
args (ArgumentParser): parsed args from an ArgumentParser
"""
# Proceed?
go = False
# Current working directory
dir_path = Path().resolve()
# Set of all available projects in the dir
# Projects must contain a PROJECT_MARKER file.
all_projects = {
m
for m in os.listdir(dir_path)
if os.path.isdir(m) and "source" in os.listdir(dir_path / m)
}
if args.all and args.projects:
print(
"{}Can't use both the 'projects' and 'all' flags{}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
if not args.all and not args.projects:
print(
"{}You have to specify at least one project (or all){}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
if args.force:
go = True
projects = (
all_projects if args.all else all_projects.intersection(set(args.projects))
)
elif args.projects:
s = "You are about to build the docs for: "
s += "\n- {}\nContinue? (y/n) ".format("\n- ".join(args.projects))
if "y" in input(s):
go = True
projects = all_projects.intersection(set(args.projects))
elif args.all:
s = "You're about to build the docs for ALL projects."
s += "\nContinue? (y/n) "
if "y" in input(s):
go = True
projects = all_projects
if go:
# Update projects links
listed_projects = utils.get_listed_projects()
# Don't update projects which are not listed in the Documentation's
# Home if the -o flag was used
if args.only_index:
projects = listed_projects.intersection(projects)
print("projects", projects)
for project_to_build in projects:
# Re-build documentation
warnings.warn("[sphinx]")
if args.verbose:
os.system(
"cd {} && make clean && make html".format(
dir_path / project_to_build
)
)
else:
os.system(
"cd {} && make clean && make html > /dev/null".format(
dir_path / project_to_build
)
)
# Add link to Documentation's Home
utils.overwrite_view_source(project_to_build, dir_path)
if args.verbose:
print("\n>>>>>> Done {}\n\n\n".format(project_to_build))
# Build Documentation
if args.verbose:
os.system("mkdocs build")
print("\n\n>>>>>> Build Complete.")
else:
warnings.warn("[mkdocs]")
os.system("mkdocs build > /dev/null")
if args.offline:
utils.make_offline() | [
"def",
"build",
"(",
"args",
")",
":",
"# Proceed?",
"go",
"=",
"False",
"# Current working directory",
"dir_path",
"=",
"Path",
"(",
")",
".",
"resolve",
"(",
")",
"# Set of all available projects in the dir",
"# Projects must contain a PROJECT_MARKER file.",
"all_projects",
"=",
"{",
"m",
"for",
"m",
"in",
"os",
".",
"listdir",
"(",
"dir_path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"m",
")",
"and",
"\"source\"",
"in",
"os",
".",
"listdir",
"(",
"dir_path",
"/",
"m",
")",
"}",
"if",
"args",
".",
"all",
"and",
"args",
".",
"projects",
":",
"print",
"(",
"\"{}Can't use both the 'projects' and 'all' flags{}\"",
".",
"format",
"(",
"utils",
".",
"colors",
".",
"FAIL",
",",
"utils",
".",
"colors",
".",
"ENDC",
")",
")",
"return",
"if",
"not",
"args",
".",
"all",
"and",
"not",
"args",
".",
"projects",
":",
"print",
"(",
"\"{}You have to specify at least one project (or all){}\"",
".",
"format",
"(",
"utils",
".",
"colors",
".",
"FAIL",
",",
"utils",
".",
"colors",
".",
"ENDC",
")",
")",
"return",
"if",
"args",
".",
"force",
":",
"go",
"=",
"True",
"projects",
"=",
"(",
"all_projects",
"if",
"args",
".",
"all",
"else",
"all_projects",
".",
"intersection",
"(",
"set",
"(",
"args",
".",
"projects",
")",
")",
")",
"elif",
"args",
".",
"projects",
":",
"s",
"=",
"\"You are about to build the docs for: \"",
"s",
"+=",
"\"\\n- {}\\nContinue? (y/n) \"",
".",
"format",
"(",
"\"\\n- \"",
".",
"join",
"(",
"args",
".",
"projects",
")",
")",
"if",
"\"y\"",
"in",
"input",
"(",
"s",
")",
":",
"go",
"=",
"True",
"projects",
"=",
"all_projects",
".",
"intersection",
"(",
"set",
"(",
"args",
".",
"projects",
")",
")",
"elif",
"args",
".",
"all",
":",
"s",
"=",
"\"You're about to build the docs for ALL projects.\"",
"s",
"+=",
"\"\\nContinue? (y/n) \"",
"if",
"\"y\"",
"in",
"input",
"(",
"s",
")",
":",
"go",
"=",
"True",
"projects",
"=",
"all_projects",
"if",
"go",
":",
"# Update projects links",
"listed_projects",
"=",
"utils",
".",
"get_listed_projects",
"(",
")",
"# Don't update projects which are not listed in the Documentation's",
"# Home if the -o flag was used",
"if",
"args",
".",
"only_index",
":",
"projects",
"=",
"listed_projects",
".",
"intersection",
"(",
"projects",
")",
"print",
"(",
"\"projects\"",
",",
"projects",
")",
"for",
"project_to_build",
"in",
"projects",
":",
"# Re-build documentation",
"warnings",
".",
"warn",
"(",
"\"[sphinx]\"",
")",
"if",
"args",
".",
"verbose",
":",
"os",
".",
"system",
"(",
"\"cd {} && make clean && make html\"",
".",
"format",
"(",
"dir_path",
"/",
"project_to_build",
")",
")",
"else",
":",
"os",
".",
"system",
"(",
"\"cd {} && make clean && make html > /dev/null\"",
".",
"format",
"(",
"dir_path",
"/",
"project_to_build",
")",
")",
"# Add link to Documentation's Home",
"utils",
".",
"overwrite_view_source",
"(",
"project_to_build",
",",
"dir_path",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\n>>>>>> Done {}\\n\\n\\n\"",
".",
"format",
"(",
"project_to_build",
")",
")",
"# Build Documentation",
"if",
"args",
".",
"verbose",
":",
"os",
".",
"system",
"(",
"\"mkdocs build\"",
")",
"print",
"(",
"\"\\n\\n>>>>>> Build Complete.\"",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"[mkdocs]\"",
")",
"os",
".",
"system",
"(",
"\"mkdocs build > /dev/null\"",
")",
"if",
"args",
".",
"offline",
":",
"utils",
".",
"make_offline",
"(",
")"
]
| Build the documentation for the projects specified in the CLI.
It will do 4 different things for each project the
user asks for (see flags):
1. Update mkdocs's index.md file with links to project
documentations
2. Build these documentations
3. Update the documentations' index.html file to add a link
back to the home of all documentations
4. Build mkdoc's home documentation
Args:
args (ArgumentParser): parsed args from an ArgumentParser | [
"Build",
"the",
"documentation",
"for",
"the",
"projects",
"specified",
"in",
"the",
"CLI",
".",
"It",
"will",
"do",
"4",
"different",
"things",
"for",
"each",
"project",
"the",
"user",
"asks",
"for",
"(",
"see",
"flags",
")",
":",
"1",
".",
"Update",
"mkdocs",
"s",
"index",
".",
"md",
"file",
"with",
"links",
"to",
"project",
"documentations",
"2",
".",
"Build",
"these",
"documentations",
"3",
".",
"Update",
"the",
"documentations",
"index",
".",
"html",
"file",
"to",
"add",
"a",
"link",
"back",
"to",
"the",
"home",
"of",
"all",
"documentations",
"4",
".",
"Build",
"mkdoc",
"s",
"home",
"documentation"
]
| python | train |
librosa/librosa | librosa/core/harmonic.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/harmonic.py#L13-L104 | def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal | [
"def",
"salience",
"(",
"S",
",",
"freqs",
",",
"h_range",
",",
"weights",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"filter_peaks",
"=",
"True",
",",
"fill_value",
"=",
"np",
".",
"nan",
",",
"kind",
"=",
"'linear'",
",",
"axis",
"=",
"0",
")",
":",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"average",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"h_range",
")",
",",
")",
")",
"else",
":",
"weights",
"=",
"np",
".",
"array",
"(",
"weights",
",",
"dtype",
"=",
"float",
")",
"S_harm",
"=",
"interp_harmonics",
"(",
"S",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"kind",
",",
"axis",
"=",
"axis",
")",
"if",
"aggregate",
"is",
"np",
".",
"average",
":",
"S_sal",
"=",
"aggregate",
"(",
"S_harm",
",",
"axis",
"=",
"0",
",",
"weights",
"=",
"weights",
")",
"else",
":",
"S_sal",
"=",
"aggregate",
"(",
"S_harm",
",",
"axis",
"=",
"0",
")",
"if",
"filter_peaks",
":",
"S_peaks",
"=",
"scipy",
".",
"signal",
".",
"argrelmax",
"(",
"S",
",",
"axis",
"=",
"0",
")",
"S_out",
"=",
"np",
".",
"empty",
"(",
"S",
".",
"shape",
")",
"S_out",
".",
"fill",
"(",
"fill_value",
")",
"S_out",
"[",
"S_peaks",
"[",
"0",
"]",
",",
"S_peaks",
"[",
"1",
"]",
"]",
"=",
"S_sal",
"[",
"S_peaks",
"[",
"0",
"]",
",",
"S_peaks",
"[",
"1",
"]",
"]",
"S_sal",
"=",
"S_out",
"return",
"S_sal"
]
| Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout() | [
"Harmonic",
"salience",
"function",
"."
]
| python | test |
henzk/django-productline | django_productline/features/staticfiles/tasks.py | https://github.com/henzk/django-productline/blob/24ff156924c1a8c07b99cbb8a1de0a42b8d81f60/django_productline/features/staticfiles/tasks.py#L8-L22 | def collectstatic(force=False):
"""
collect static files for production httpd
If run with ``settings.DEBUG==True``, this is a no-op
unless ``force`` is set to ``True``
"""
# noise reduction: only collectstatic if not in debug mode
from django.conf import settings
if force or not settings.DEBUG:
tasks.manage('collectstatic', '--noinput')
print('... finished collectstatic')
print('')
else:
print('... skipping collectstatic as settings.DEBUG=True; If you want to generate staticfiles anyway, run ape collectstatic instead;') | [
"def",
"collectstatic",
"(",
"force",
"=",
"False",
")",
":",
"# noise reduction: only collectstatic if not in debug mode",
"from",
"django",
".",
"conf",
"import",
"settings",
"if",
"force",
"or",
"not",
"settings",
".",
"DEBUG",
":",
"tasks",
".",
"manage",
"(",
"'collectstatic'",
",",
"'--noinput'",
")",
"print",
"(",
"'... finished collectstatic'",
")",
"print",
"(",
"''",
")",
"else",
":",
"print",
"(",
"'... skipping collectstatic as settings.DEBUG=True; If you want to generate staticfiles anyway, run ape collectstatic instead;'",
")"
]
| collect static files for production httpd
If run with ``settings.DEBUG==True``, this is a no-op
unless ``force`` is set to ``True`` | [
"collect",
"static",
"files",
"for",
"production",
"httpd"
]
| python | train |
pypa/pipenv | pipenv/patched/notpip/_internal/vcs/bazaar.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/vcs/bazaar.py#L37-L51 | def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
# Remove the location to make sure Bazaar can export it correctly
if os.path.exists(location):
rmtree(location)
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path)
self.run_command(
['export', location],
cwd=temp_dir.path, show_stdout=False,
) | [
"def",
"export",
"(",
"self",
",",
"location",
")",
":",
"# Remove the location to make sure Bazaar can export it correctly",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"location",
")",
":",
"rmtree",
"(",
"location",
")",
"with",
"TempDirectory",
"(",
"kind",
"=",
"\"export\"",
")",
"as",
"temp_dir",
":",
"self",
".",
"unpack",
"(",
"temp_dir",
".",
"path",
")",
"self",
".",
"run_command",
"(",
"[",
"'export'",
",",
"location",
"]",
",",
"cwd",
"=",
"temp_dir",
".",
"path",
",",
"show_stdout",
"=",
"False",
",",
")"
]
| Export the Bazaar repository at the url to the destination location | [
"Export",
"the",
"Bazaar",
"repository",
"at",
"the",
"url",
"to",
"the",
"destination",
"location"
]
| python | train |
Tivix/django-common | django_common/db_fields.py | https://github.com/Tivix/django-common/blob/407d208121011a8425139e541629554114d96c18/django_common/db_fields.py#L46-L55 | def get_prep_value(self, value):
"""Convert our JSON object to a string before we save"""
if value == "":
return None
if isinstance(value, dict):
value = json.dumps(value, cls=DjangoJSONEncoder)
return value | [
"def",
"get_prep_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"\"\"",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"json",
".",
"dumps",
"(",
"value",
",",
"cls",
"=",
"DjangoJSONEncoder",
")",
"return",
"value"
]
| Convert our JSON object to a string before we save | [
"Convert",
"our",
"JSON",
"object",
"to",
"a",
"string",
"before",
"we",
"save"
]
| python | train |
kimdhamilton/merkle-proofs | merkleproof/MerkleTree.py | https://github.com/kimdhamilton/merkle-proofs/blob/77551cc65f72b50ac203f10a5069cb1a5b3ffb49/merkleproof/MerkleTree.py#L162-L171 | def validate_proof(self, proof, target_hash, merkle_root):
"""
Takes a proof array, a target hash value, and a merkle root
Checks the validity of the proof and return true or false
:param proof:
:param target_hash:
:param merkle_root:
:return:
"""
return validate_proof(proof, target_hash, merkle_root, self.hash_f) | [
"def",
"validate_proof",
"(",
"self",
",",
"proof",
",",
"target_hash",
",",
"merkle_root",
")",
":",
"return",
"validate_proof",
"(",
"proof",
",",
"target_hash",
",",
"merkle_root",
",",
"self",
".",
"hash_f",
")"
]
| Takes a proof array, a target hash value, and a merkle root
Checks the validity of the proof and return true or false
:param proof:
:param target_hash:
:param merkle_root:
:return: | [
"Takes",
"a",
"proof",
"array",
"a",
"target",
"hash",
"value",
"and",
"a",
"merkle",
"root",
"Checks",
"the",
"validity",
"of",
"the",
"proof",
"and",
"return",
"true",
"or",
"false",
":",
"param",
"proof",
":",
":",
"param",
"target_hash",
":",
":",
"param",
"merkle_root",
":",
":",
"return",
":"
]
| python | train |
myusuf3/delorean | delorean/interface.py | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L116-L121 | def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count) | [
"def",
"range_hourly",
"(",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"timezone",
"=",
"'UTC'",
",",
"count",
"=",
"None",
")",
":",
"return",
"stops",
"(",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"freq",
"=",
"HOURLY",
",",
"timezone",
"=",
"timezone",
",",
"count",
"=",
"count",
")"
]
| This an alternative way to generating sets of Delorean objects with
HOURLY stops | [
"This",
"an",
"alternative",
"way",
"to",
"generating",
"sets",
"of",
"Delorean",
"objects",
"with",
"HOURLY",
"stops"
]
| python | train |
apache/airflow | airflow/utils/timezone.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L82-L95 | def convert_to_utc(value):
"""
Returns the datetime with the default timezone added if timezone
information was not associated
:param value: datetime
:return: datetime with tzinfo
"""
if not value:
return value
if not is_localized(value):
value = pendulum.instance(value, TIMEZONE)
return value.astimezone(utc) | [
"def",
"convert_to_utc",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"value",
"if",
"not",
"is_localized",
"(",
"value",
")",
":",
"value",
"=",
"pendulum",
".",
"instance",
"(",
"value",
",",
"TIMEZONE",
")",
"return",
"value",
".",
"astimezone",
"(",
"utc",
")"
]
| Returns the datetime with the default timezone added if timezone
information was not associated
:param value: datetime
:return: datetime with tzinfo | [
"Returns",
"the",
"datetime",
"with",
"the",
"default",
"timezone",
"added",
"if",
"timezone",
"information",
"was",
"not",
"associated",
":",
"param",
"value",
":",
"datetime",
":",
"return",
":",
"datetime",
"with",
"tzinfo"
]
| python | test |
Diaoul/pyjulius | pyjulius/core.py | https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L191-L203 | def _readxml(self):
"""Read a block and return the result as XML
:return: block as xml
:rtype: xml.etree.ElementTree
"""
block = re.sub(r'<(/?)s>', r'<\1s>', self._readblock())
try:
xml = XML(block)
except ParseError:
xml = None
return xml | [
"def",
"_readxml",
"(",
"self",
")",
":",
"block",
"=",
"re",
".",
"sub",
"(",
"r'<(/?)s>'",
",",
"r'<\\1s>'",
",",
"self",
".",
"_readblock",
"(",
")",
")",
"try",
":",
"xml",
"=",
"XML",
"(",
"block",
")",
"except",
"ParseError",
":",
"xml",
"=",
"None",
"return",
"xml"
]
| Read a block and return the result as XML
:return: block as xml
:rtype: xml.etree.ElementTree | [
"Read",
"a",
"block",
"and",
"return",
"the",
"result",
"as",
"XML"
]
| python | valid |
thespacedoctor/neddy | neddy/_basesearch.py | https://github.com/thespacedoctor/neddy/blob/f32653b7d6a39a2c46c5845f83b3a29056311e5e/neddy/_basesearch.py#L88-L146 | def _parse_the_ned_position_results(
self,
ra,
dec,
nedResults):
"""
*parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries
"""
self.log.info('starting the ``_parse_the_ned_results`` method')
results = []
resultLen = 0
if nedResults:
# OPEN THE RESULT FILE FROM NED
pathToReadFile = nedResults
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='rb')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
# CHECK FOR ERRORS
if "Results from query to NASA/IPAC Extragalactic Database" not in thisData:
print "something went wrong with the NED query"
self.log.error(
"something went wrong with the NED query" % locals())
sys.exit(0)
# SEARCH FROM MATCHES IN RESULTS FILE
matchObject = re.search(
r"No\.\|Object Name.*?\n(.*)", thisData, re.S)
if matchObject:
theseLines = string.split(matchObject.group(), '\n')
resultLen = len(theseLines)
csvReader = csv.DictReader(
theseLines, dialect='excel', delimiter='|', quotechar='"')
for row in csvReader:
thisEntry = {"searchRa": ra, "searchDec": dec,
"matchName": row["Object Name"].strip()}
results.append(thisEntry)
if self.nearestOnly:
break
self.log.info('completed the ``_parse_the_ned_results`` method')
return results, resultLen | [
"def",
"_parse_the_ned_position_results",
"(",
"self",
",",
"ra",
",",
"dec",
",",
"nedResults",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``_parse_the_ned_results`` method'",
")",
"results",
"=",
"[",
"]",
"resultLen",
"=",
"0",
"if",
"nedResults",
":",
"# OPEN THE RESULT FILE FROM NED",
"pathToReadFile",
"=",
"nedResults",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"attempting to open the file %s\"",
"%",
"(",
"pathToReadFile",
",",
")",
")",
"readFile",
"=",
"codecs",
".",
"open",
"(",
"pathToReadFile",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'rb'",
")",
"thisData",
"=",
"readFile",
".",
"read",
"(",
")",
"readFile",
".",
"close",
"(",
")",
"except",
"IOError",
",",
"e",
":",
"message",
"=",
"'could not open the file %s'",
"%",
"(",
"pathToReadFile",
",",
")",
"self",
".",
"log",
".",
"critical",
"(",
"message",
")",
"raise",
"IOError",
"(",
"message",
")",
"readFile",
".",
"close",
"(",
")",
"# CHECK FOR ERRORS",
"if",
"\"Results from query to NASA/IPAC Extragalactic Database\"",
"not",
"in",
"thisData",
":",
"print",
"\"something went wrong with the NED query\"",
"self",
".",
"log",
".",
"error",
"(",
"\"something went wrong with the NED query\"",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# SEARCH FROM MATCHES IN RESULTS FILE",
"matchObject",
"=",
"re",
".",
"search",
"(",
"r\"No\\.\\|Object Name.*?\\n(.*)\"",
",",
"thisData",
",",
"re",
".",
"S",
")",
"if",
"matchObject",
":",
"theseLines",
"=",
"string",
".",
"split",
"(",
"matchObject",
".",
"group",
"(",
")",
",",
"'\\n'",
")",
"resultLen",
"=",
"len",
"(",
"theseLines",
")",
"csvReader",
"=",
"csv",
".",
"DictReader",
"(",
"theseLines",
",",
"dialect",
"=",
"'excel'",
",",
"delimiter",
"=",
"'|'",
",",
"quotechar",
"=",
"'\"'",
")",
"for",
"row",
"in",
"csvReader",
":",
"thisEntry",
"=",
"{",
"\"searchRa\"",
":",
"ra",
",",
"\"searchDec\"",
":",
"dec",
",",
"\"matchName\"",
":",
"row",
"[",
"\"Object Name\"",
"]",
".",
"strip",
"(",
")",
"}",
"results",
".",
"append",
"(",
"thisEntry",
")",
"if",
"self",
".",
"nearestOnly",
":",
"break",
"self",
".",
"log",
".",
"info",
"(",
"'completed the ``_parse_the_ned_results`` method'",
")",
"return",
"results",
",",
"resultLen"
]
| *parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries | [
"*",
"parse",
"the",
"ned",
"results",
"*"
]
| python | train |
tensorflow/cleverhans | cleverhans/attacks/deep_fool.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/deep_fool.py#L168-L252 | def deepfool_attack(sess,
x,
predictions,
logits,
grads,
sample,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=None):
"""
TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples
"""
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug(
"Starting DeepFool attack up to %s iterations", max_iter)
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration %s is %s", iteration, current)
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration %s is %s", iteration, current)
_logger.info("%s out of %s become adversarial examples at iteration %s",
sum(current != original),
sample.shape[0],
iteration)
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x | [
"def",
"deepfool_attack",
"(",
"sess",
",",
"x",
",",
"predictions",
",",
"logits",
",",
"grads",
",",
"sample",
",",
"nb_candidate",
",",
"overshoot",
",",
"max_iter",
",",
"clip_min",
",",
"clip_max",
",",
"feed",
"=",
"None",
")",
":",
"adv_x",
"=",
"copy",
".",
"copy",
"(",
"sample",
")",
"# Initialize the loop variables",
"iteration",
"=",
"0",
"current",
"=",
"utils_tf",
".",
"model_argmax",
"(",
"sess",
",",
"x",
",",
"logits",
",",
"adv_x",
",",
"feed",
"=",
"feed",
")",
"if",
"current",
".",
"shape",
"==",
"(",
")",
":",
"current",
"=",
"np",
".",
"array",
"(",
"[",
"current",
"]",
")",
"w",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"zeros",
"(",
"sample",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"# same shape as original image",
"r_tot",
"=",
"np",
".",
"zeros",
"(",
"sample",
".",
"shape",
")",
"original",
"=",
"current",
"# use original label as the reference",
"_logger",
".",
"debug",
"(",
"\"Starting DeepFool attack up to %s iterations\"",
",",
"max_iter",
")",
"# Repeat this main loop until we have achieved misclassification",
"while",
"(",
"np",
".",
"any",
"(",
"current",
"==",
"original",
")",
"and",
"iteration",
"<",
"max_iter",
")",
":",
"if",
"iteration",
"%",
"5",
"==",
"0",
"and",
"iteration",
">",
"0",
":",
"_logger",
".",
"info",
"(",
"\"Attack result at iteration %s is %s\"",
",",
"iteration",
",",
"current",
")",
"gradients",
"=",
"sess",
".",
"run",
"(",
"grads",
",",
"feed_dict",
"=",
"{",
"x",
":",
"adv_x",
"}",
")",
"predictions_val",
"=",
"sess",
".",
"run",
"(",
"predictions",
",",
"feed_dict",
"=",
"{",
"x",
":",
"adv_x",
"}",
")",
"for",
"idx",
"in",
"range",
"(",
"sample",
".",
"shape",
"[",
"0",
"]",
")",
":",
"pert",
"=",
"np",
".",
"inf",
"if",
"current",
"[",
"idx",
"]",
"!=",
"original",
"[",
"idx",
"]",
":",
"continue",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"nb_candidate",
")",
":",
"w_k",
"=",
"gradients",
"[",
"idx",
",",
"k",
",",
"...",
"]",
"-",
"gradients",
"[",
"idx",
",",
"0",
",",
"...",
"]",
"f_k",
"=",
"predictions_val",
"[",
"idx",
",",
"k",
"]",
"-",
"predictions_val",
"[",
"idx",
",",
"0",
"]",
"# adding value 0.00001 to prevent f_k = 0",
"pert_k",
"=",
"(",
"abs",
"(",
"f_k",
")",
"+",
"0.00001",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"w_k",
".",
"flatten",
"(",
")",
")",
"if",
"pert_k",
"<",
"pert",
":",
"pert",
"=",
"pert_k",
"w",
"=",
"w_k",
"r_i",
"=",
"pert",
"*",
"w",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"w",
")",
"r_tot",
"[",
"idx",
",",
"...",
"]",
"=",
"r_tot",
"[",
"idx",
",",
"...",
"]",
"+",
"r_i",
"adv_x",
"=",
"np",
".",
"clip",
"(",
"r_tot",
"+",
"sample",
",",
"clip_min",
",",
"clip_max",
")",
"current",
"=",
"utils_tf",
".",
"model_argmax",
"(",
"sess",
",",
"x",
",",
"logits",
",",
"adv_x",
",",
"feed",
"=",
"feed",
")",
"if",
"current",
".",
"shape",
"==",
"(",
")",
":",
"current",
"=",
"np",
".",
"array",
"(",
"[",
"current",
"]",
")",
"# Update loop variables",
"iteration",
"=",
"iteration",
"+",
"1",
"# need more revision, including info like how many succeed",
"_logger",
".",
"info",
"(",
"\"Attack result at iteration %s is %s\"",
",",
"iteration",
",",
"current",
")",
"_logger",
".",
"info",
"(",
"\"%s out of %s become adversarial examples at iteration %s\"",
",",
"sum",
"(",
"current",
"!=",
"original",
")",
",",
"sample",
".",
"shape",
"[",
"0",
"]",
",",
"iteration",
")",
"# need to clip this image into the given range",
"adv_x",
"=",
"np",
".",
"clip",
"(",
"(",
"1",
"+",
"overshoot",
")",
"*",
"r_tot",
"+",
"sample",
",",
"clip_min",
",",
"clip_max",
")",
"return",
"adv_x"
]
| TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples | [
"TensorFlow",
"implementation",
"of",
"DeepFool",
".",
"Paper",
"link",
":",
"see",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1511",
".",
"04599",
".",
"pdf",
":",
"param",
"sess",
":",
"TF",
"session",
":",
"param",
"x",
":",
"The",
"input",
"placeholder",
":",
"param",
"predictions",
":",
"The",
"model",
"s",
"sorted",
"symbolic",
"output",
"of",
"logits",
"only",
"the",
"top",
"nb_candidate",
"classes",
"are",
"contained",
":",
"param",
"logits",
":",
"The",
"model",
"s",
"unnormalized",
"output",
"tensor",
"(",
"the",
"input",
"to",
"the",
"softmax",
"layer",
")",
":",
"param",
"grads",
":",
"Symbolic",
"gradients",
"of",
"the",
"top",
"nb_candidate",
"classes",
"procuded",
"from",
"gradient_graph",
":",
"param",
"sample",
":",
"Numpy",
"array",
"with",
"sample",
"input",
":",
"param",
"nb_candidate",
":",
"The",
"number",
"of",
"classes",
"to",
"test",
"against",
"i",
".",
"e",
".",
"deepfool",
"only",
"consider",
"nb_candidate",
"classes",
"when",
"attacking",
"(",
"thus",
"accelerate",
"speed",
")",
".",
"The",
"nb_candidate",
"classes",
"are",
"chosen",
"according",
"to",
"the",
"prediction",
"confidence",
"during",
"implementation",
".",
":",
"param",
"overshoot",
":",
"A",
"termination",
"criterion",
"to",
"prevent",
"vanishing",
"updates",
":",
"param",
"max_iter",
":",
"Maximum",
"number",
"of",
"iteration",
"for",
"DeepFool",
":",
"param",
"clip_min",
":",
"Minimum",
"value",
"for",
"components",
"of",
"the",
"example",
"returned",
":",
"param",
"clip_max",
":",
"Maximum",
"value",
"for",
"components",
"of",
"the",
"example",
"returned",
":",
"return",
":",
"Adversarial",
"examples"
]
| python | train |
unitedstack/steth | stetho/agent/api.py | https://github.com/unitedstack/steth/blob/955884ceebf3bdc474c93cc5cf555e67d16458f1/stetho/agent/api.py#L132-L141 | def setup_iperf_server(self, protocol='TCP', port=5001, window=None):
"""iperf -s
"""
iperf = iperf_driver.IPerfDriver()
try:
data = iperf.start_server(protocol='TCP', port=5001, window=None)
return agent_utils.make_response(code=0, data=data)
except:
message = 'Start iperf server failed!'
return agent_utils.make_response(code=1, message=message) | [
"def",
"setup_iperf_server",
"(",
"self",
",",
"protocol",
"=",
"'TCP'",
",",
"port",
"=",
"5001",
",",
"window",
"=",
"None",
")",
":",
"iperf",
"=",
"iperf_driver",
".",
"IPerfDriver",
"(",
")",
"try",
":",
"data",
"=",
"iperf",
".",
"start_server",
"(",
"protocol",
"=",
"'TCP'",
",",
"port",
"=",
"5001",
",",
"window",
"=",
"None",
")",
"return",
"agent_utils",
".",
"make_response",
"(",
"code",
"=",
"0",
",",
"data",
"=",
"data",
")",
"except",
":",
"message",
"=",
"'Start iperf server failed!'",
"return",
"agent_utils",
".",
"make_response",
"(",
"code",
"=",
"1",
",",
"message",
"=",
"message",
")"
]
| iperf -s | [
"iperf",
"-",
"s"
]
| python | train |
leancloud/python-sdk | leancloud/object_.py | https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/object_.py#L466-L474 | def remove(self, attr, item):
"""
在对象此字段对应的数组中,将指定对象全部移除。
:param attr: 字段名
:param item: 要移除的对象
:return: 当前对象
"""
return self.set(attr, operation.Remove([item])) | [
"def",
"remove",
"(",
"self",
",",
"attr",
",",
"item",
")",
":",
"return",
"self",
".",
"set",
"(",
"attr",
",",
"operation",
".",
"Remove",
"(",
"[",
"item",
"]",
")",
")"
]
| 在对象此字段对应的数组中,将指定对象全部移除。
:param attr: 字段名
:param item: 要移除的对象
:return: 当前对象 | [
"在对象此字段对应的数组中,将指定对象全部移除。"
]
| python | train |
hhatto/autopep8 | autopep8.py | https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2906-L2941 | def _execute_pep8(pep8_options, source):
"""Execute pycodestyle via python method calls."""
class QuietReport(pycodestyle.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, check):
"""Collect errors."""
code = super(QuietReport, self).error(line_number,
offset,
text,
check)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pycodestyle.Checker('', lines=source, reporter=QuietReport,
**pep8_options)
checker.check_all()
return checker.report.full_error_results() | [
"def",
"_execute_pep8",
"(",
"pep8_options",
",",
"source",
")",
":",
"class",
"QuietReport",
"(",
"pycodestyle",
".",
"BaseReport",
")",
":",
"\"\"\"Version of checker that does not print.\"\"\"",
"def",
"__init__",
"(",
"self",
",",
"options",
")",
":",
"super",
"(",
"QuietReport",
",",
"self",
")",
".",
"__init__",
"(",
"options",
")",
"self",
".",
"__full_error_results",
"=",
"[",
"]",
"def",
"error",
"(",
"self",
",",
"line_number",
",",
"offset",
",",
"text",
",",
"check",
")",
":",
"\"\"\"Collect errors.\"\"\"",
"code",
"=",
"super",
"(",
"QuietReport",
",",
"self",
")",
".",
"error",
"(",
"line_number",
",",
"offset",
",",
"text",
",",
"check",
")",
"if",
"code",
":",
"self",
".",
"__full_error_results",
".",
"append",
"(",
"{",
"'id'",
":",
"code",
",",
"'line'",
":",
"line_number",
",",
"'column'",
":",
"offset",
"+",
"1",
",",
"'info'",
":",
"text",
"}",
")",
"def",
"full_error_results",
"(",
"self",
")",
":",
"\"\"\"Return error results in detail.\n\n Results are in the form of a list of dictionaries. Each\n dictionary contains 'id', 'line', 'column', and 'info'.\n\n \"\"\"",
"return",
"self",
".",
"__full_error_results",
"checker",
"=",
"pycodestyle",
".",
"Checker",
"(",
"''",
",",
"lines",
"=",
"source",
",",
"reporter",
"=",
"QuietReport",
",",
"*",
"*",
"pep8_options",
")",
"checker",
".",
"check_all",
"(",
")",
"return",
"checker",
".",
"report",
".",
"full_error_results",
"(",
")"
]
| Execute pycodestyle via python method calls. | [
"Execute",
"pycodestyle",
"via",
"python",
"method",
"calls",
"."
]
| python | train |
GNS3/gns3-server | gns3server/compute/dynamips/nodes/ethernet_switch.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/ethernet_switch.py#L175-L186 | def set_name(self, new_name):
"""
Renames this Ethernet switch.
:param new_name: New name for this switch
"""
yield from self._hypervisor.send('ethsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
log.info('Ethernet switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name,
id=self._id,
new_name=new_name))
self._name = new_name | [
"def",
"set_name",
"(",
"self",
",",
"new_name",
")",
":",
"yield",
"from",
"self",
".",
"_hypervisor",
".",
"send",
"(",
"'ethsw rename \"{name}\" \"{new_name}\"'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"new_name",
"=",
"new_name",
")",
")",
"log",
".",
"info",
"(",
"'Ethernet switch \"{name}\" [{id}]: renamed to \"{new_name}\"'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"new_name",
"=",
"new_name",
")",
")",
"self",
".",
"_name",
"=",
"new_name"
]
| Renames this Ethernet switch.
:param new_name: New name for this switch | [
"Renames",
"this",
"Ethernet",
"switch",
"."
]
| python | train |
ulfalizer/Kconfiglib | kconfiglib.py | https://github.com/ulfalizer/Kconfiglib/blob/9fe13c03de16c341cd7ed40167216207b821ea50/kconfiglib.py#L1448-L1544 | def sync_deps(self, path):
"""
Creates or updates a directory structure that can be used to avoid
doing a full rebuild whenever the configuration is changed, mirroring
include/config/ in the kernel.
This function is intended to be called during each build, before
compiling source files that depend on configuration symbols.
path:
Path to directory
sync_deps(path) does the following:
1. If the directory <path> does not exist, it is created.
2. If <path>/auto.conf exists, old symbol values are loaded from it,
which are then compared against the current symbol values. If a
symbol has changed value (would generate different output in
autoconf.h compared to before), the change is signaled by
touch'ing a file corresponding to the symbol.
The first time sync_deps() is run on a directory, <path>/auto.conf
won't exist, and no old symbol values will be available. This
logically has the same effect as updating the entire
configuration.
The path to a symbol's file is calculated from the symbol's name
by replacing all '_' with '/' and appending '.h'. For example, the
symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO
gets the file <path>/foo.h.
This scheme matches the C tools. The point is to avoid having a
single directory with a huge number of files, which the underlying
filesystem might not handle well.
3. A new auto.conf with the current symbol values is written, to keep
track of them for the next build.
The last piece of the puzzle is knowing what symbols each source file
depends on. Knowing that, dependencies can be added from source files
to the files corresponding to the symbols they depends on. The source
file will then get recompiled (only) when the symbol value changes
(provided sync_deps() is run first during each build).
The tool in the kernel that extracts symbol dependencies from source
files is scripts/basic/fixdep.c. Missing symbol files also correspond
to "not changed", which fixdep deals with by using the $(wildcard) Make
function when adding symbol prerequisites to source files.
In case you need a different scheme for your project, the sync_deps()
implementation can be used as a template.
"""
if not exists(path):
os.mkdir(path, 0o755)
# Load old values from auto.conf, if any
self._load_old_vals(path)
for sym in self.unique_defined_syms:
# Note: _write_to_conf is determined when the value is
# calculated. This is a hidden function call due to
# property magic.
val = sym.str_value
# Note: n tristate values do not get written to auto.conf and
# autoconf.h, making a missing symbol logically equivalent to n
if sym._write_to_conf:
if sym._old_val is None and \
sym.orig_type in _BOOL_TRISTATE and \
val == "n":
# No old value (the symbol was missing or n), new value n.
# No change.
continue
if val == sym._old_val:
# New value matches old. No change.
continue
elif sym._old_val is None:
# The symbol wouldn't appear in autoconf.h (because
# _write_to_conf is false), and it wouldn't have appeared in
# autoconf.h previously either (because it didn't appear in
# auto.conf). No change.
continue
# 'sym' has a new value. Flag it.
_touch_dep_file(path, sym.name)
# Remember the current values as the "new old" values.
#
# This call could go anywhere after the call to _load_old_vals(), but
# putting it last means _sync_deps() can be safely rerun if it fails
# before this point.
self._write_old_vals(path) | [
"def",
"sync_deps",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"exists",
"(",
"path",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
",",
"0o755",
")",
"# Load old values from auto.conf, if any",
"self",
".",
"_load_old_vals",
"(",
"path",
")",
"for",
"sym",
"in",
"self",
".",
"unique_defined_syms",
":",
"# Note: _write_to_conf is determined when the value is",
"# calculated. This is a hidden function call due to",
"# property magic.",
"val",
"=",
"sym",
".",
"str_value",
"# Note: n tristate values do not get written to auto.conf and",
"# autoconf.h, making a missing symbol logically equivalent to n",
"if",
"sym",
".",
"_write_to_conf",
":",
"if",
"sym",
".",
"_old_val",
"is",
"None",
"and",
"sym",
".",
"orig_type",
"in",
"_BOOL_TRISTATE",
"and",
"val",
"==",
"\"n\"",
":",
"# No old value (the symbol was missing or n), new value n.",
"# No change.",
"continue",
"if",
"val",
"==",
"sym",
".",
"_old_val",
":",
"# New value matches old. No change.",
"continue",
"elif",
"sym",
".",
"_old_val",
"is",
"None",
":",
"# The symbol wouldn't appear in autoconf.h (because",
"# _write_to_conf is false), and it wouldn't have appeared in",
"# autoconf.h previously either (because it didn't appear in",
"# auto.conf). No change.",
"continue",
"# 'sym' has a new value. Flag it.",
"_touch_dep_file",
"(",
"path",
",",
"sym",
".",
"name",
")",
"# Remember the current values as the \"new old\" values.",
"#",
"# This call could go anywhere after the call to _load_old_vals(), but",
"# putting it last means _sync_deps() can be safely rerun if it fails",
"# before this point.",
"self",
".",
"_write_old_vals",
"(",
"path",
")"
]
| Creates or updates a directory structure that can be used to avoid
doing a full rebuild whenever the configuration is changed, mirroring
include/config/ in the kernel.
This function is intended to be called during each build, before
compiling source files that depend on configuration symbols.
path:
Path to directory
sync_deps(path) does the following:
1. If the directory <path> does not exist, it is created.
2. If <path>/auto.conf exists, old symbol values are loaded from it,
which are then compared against the current symbol values. If a
symbol has changed value (would generate different output in
autoconf.h compared to before), the change is signaled by
touch'ing a file corresponding to the symbol.
The first time sync_deps() is run on a directory, <path>/auto.conf
won't exist, and no old symbol values will be available. This
logically has the same effect as updating the entire
configuration.
The path to a symbol's file is calculated from the symbol's name
by replacing all '_' with '/' and appending '.h'. For example, the
symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO
gets the file <path>/foo.h.
This scheme matches the C tools. The point is to avoid having a
single directory with a huge number of files, which the underlying
filesystem might not handle well.
3. A new auto.conf with the current symbol values is written, to keep
track of them for the next build.
The last piece of the puzzle is knowing what symbols each source file
depends on. Knowing that, dependencies can be added from source files
to the files corresponding to the symbols they depends on. The source
file will then get recompiled (only) when the symbol value changes
(provided sync_deps() is run first during each build).
The tool in the kernel that extracts symbol dependencies from source
files is scripts/basic/fixdep.c. Missing symbol files also correspond
to "not changed", which fixdep deals with by using the $(wildcard) Make
function when adding symbol prerequisites to source files.
In case you need a different scheme for your project, the sync_deps()
implementation can be used as a template. | [
"Creates",
"or",
"updates",
"a",
"directory",
"structure",
"that",
"can",
"be",
"used",
"to",
"avoid",
"doing",
"a",
"full",
"rebuild",
"whenever",
"the",
"configuration",
"is",
"changed",
"mirroring",
"include",
"/",
"config",
"/",
"in",
"the",
"kernel",
"."
]
| python | train |
sibirrer/lenstronomy | lenstronomy/Util/simulation_util.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/simulation_util.py#L36-L68 | def psf_configure_simple(psf_type="GAUSSIAN", fwhm=1, kernelsize=11, deltaPix=1, truncate=6, kernel=None):
"""
this routine generates keyword arguments to initialize a PSF() class in lenstronomy. Have a look at the PSF class
documentation to see the full possibilities.
:param psf_type: string, type of PSF model
:param fwhm: Full width at half maximum of PSF (if GAUSSIAN psf)
:param kernelsize: size in pixel of kernel (use odd numbers), only applicable for PIXEL kernels
:param deltaPix: pixel size in angular units (only needed for GAUSSIAN kernel
:param truncate: how many sigmas out is the truncation happening
:param kernel: 2d numpy arra centered PSF (odd number per axis)
:return: keyword arguments
"""
if psf_type == 'GAUSSIAN':
sigma = util.fwhm2sigma(fwhm)
sigma_axis = sigma
gaussian = Gaussian()
x_grid, y_grid = util.make_grid(kernelsize, deltaPix)
kernel_large = gaussian.function(x_grid, y_grid, amp=1., sigma_x=sigma_axis, sigma_y=sigma_axis, center_x=0, center_y=0)
kernel_large /= np.sum(kernel_large)
kernel_large = util.array2image(kernel_large)
kernel_pixel = kernel_util.pixel_kernel(kernel_large)
kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm, 'truncation': truncate*fwhm, 'kernel_point_source': kernel_large, 'kernel_pixel': kernel_pixel, 'pixel_size': deltaPix}
elif psf_type == 'PIXEL':
kernel_large = copy.deepcopy(kernel)
kernel_large = kernel_util.cut_psf(kernel_large, psf_size=kernelsize)
kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': kernel_large}
elif psf_type == 'NONE':
kwargs_psf = {'psf_type': 'NONE'}
else:
raise ValueError("psf type %s not supported!" % psf_type)
return kwargs_psf | [
"def",
"psf_configure_simple",
"(",
"psf_type",
"=",
"\"GAUSSIAN\"",
",",
"fwhm",
"=",
"1",
",",
"kernelsize",
"=",
"11",
",",
"deltaPix",
"=",
"1",
",",
"truncate",
"=",
"6",
",",
"kernel",
"=",
"None",
")",
":",
"if",
"psf_type",
"==",
"'GAUSSIAN'",
":",
"sigma",
"=",
"util",
".",
"fwhm2sigma",
"(",
"fwhm",
")",
"sigma_axis",
"=",
"sigma",
"gaussian",
"=",
"Gaussian",
"(",
")",
"x_grid",
",",
"y_grid",
"=",
"util",
".",
"make_grid",
"(",
"kernelsize",
",",
"deltaPix",
")",
"kernel_large",
"=",
"gaussian",
".",
"function",
"(",
"x_grid",
",",
"y_grid",
",",
"amp",
"=",
"1.",
",",
"sigma_x",
"=",
"sigma_axis",
",",
"sigma_y",
"=",
"sigma_axis",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
"kernel_large",
"/=",
"np",
".",
"sum",
"(",
"kernel_large",
")",
"kernel_large",
"=",
"util",
".",
"array2image",
"(",
"kernel_large",
")",
"kernel_pixel",
"=",
"kernel_util",
".",
"pixel_kernel",
"(",
"kernel_large",
")",
"kwargs_psf",
"=",
"{",
"'psf_type'",
":",
"psf_type",
",",
"'fwhm'",
":",
"fwhm",
",",
"'truncation'",
":",
"truncate",
"*",
"fwhm",
",",
"'kernel_point_source'",
":",
"kernel_large",
",",
"'kernel_pixel'",
":",
"kernel_pixel",
",",
"'pixel_size'",
":",
"deltaPix",
"}",
"elif",
"psf_type",
"==",
"'PIXEL'",
":",
"kernel_large",
"=",
"copy",
".",
"deepcopy",
"(",
"kernel",
")",
"kernel_large",
"=",
"kernel_util",
".",
"cut_psf",
"(",
"kernel_large",
",",
"psf_size",
"=",
"kernelsize",
")",
"kwargs_psf",
"=",
"{",
"'psf_type'",
":",
"\"PIXEL\"",
",",
"'kernel_point_source'",
":",
"kernel_large",
"}",
"elif",
"psf_type",
"==",
"'NONE'",
":",
"kwargs_psf",
"=",
"{",
"'psf_type'",
":",
"'NONE'",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"\"psf type %s not supported!\"",
"%",
"psf_type",
")",
"return",
"kwargs_psf"
]
| this routine generates keyword arguments to initialize a PSF() class in lenstronomy. Have a look at the PSF class
documentation to see the full possibilities.
:param psf_type: string, type of PSF model
:param fwhm: Full width at half maximum of PSF (if GAUSSIAN psf)
:param kernelsize: size in pixel of kernel (use odd numbers), only applicable for PIXEL kernels
:param deltaPix: pixel size in angular units (only needed for GAUSSIAN kernel
:param truncate: how many sigmas out is the truncation happening
:param kernel: 2d numpy arra centered PSF (odd number per axis)
:return: keyword arguments | [
"this",
"routine",
"generates",
"keyword",
"arguments",
"to",
"initialize",
"a",
"PSF",
"()",
"class",
"in",
"lenstronomy",
".",
"Have",
"a",
"look",
"at",
"the",
"PSF",
"class",
"documentation",
"to",
"see",
"the",
"full",
"possibilities",
"."
]
| python | train |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/pi/pidigits.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/pi/pidigits.py#L64-L70 | def compute_n_digit_freqs(filename, n):
"""
Read digits of pi from a file and compute the n digit frequencies.
"""
d = txt_file_to_digits(filename)
freqs = n_digit_freqs(d, n)
return freqs | [
"def",
"compute_n_digit_freqs",
"(",
"filename",
",",
"n",
")",
":",
"d",
"=",
"txt_file_to_digits",
"(",
"filename",
")",
"freqs",
"=",
"n_digit_freqs",
"(",
"d",
",",
"n",
")",
"return",
"freqs"
]
| Read digits of pi from a file and compute the n digit frequencies. | [
"Read",
"digits",
"of",
"pi",
"from",
"a",
"file",
"and",
"compute",
"the",
"n",
"digit",
"frequencies",
"."
]
| python | test |
keon/algorithms | algorithms/matrix/multiply.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/matrix/multiply.py#L10-L28 | def multiply(multiplicand: list, multiplier: list) -> list:
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
multiplicand_row, multiplicand_col = len(
multiplicand), len(multiplicand[0])
multiplier_row, multiplier_col = len(multiplier), len(multiplier[0])
if(multiplicand_col != multiplier_row):
raise Exception(
"Multiplicand matrix not compatible with Multiplier matrix.")
# create a result matrix
result = [[0] * multiplier_col for i in range(multiplicand_row)]
for i in range(multiplicand_row):
for j in range(multiplier_col):
for k in range(len(multiplier)):
result[i][j] += multiplicand[i][k] * multiplier[k][j]
return result | [
"def",
"multiply",
"(",
"multiplicand",
":",
"list",
",",
"multiplier",
":",
"list",
")",
"->",
"list",
":",
"multiplicand_row",
",",
"multiplicand_col",
"=",
"len",
"(",
"multiplicand",
")",
",",
"len",
"(",
"multiplicand",
"[",
"0",
"]",
")",
"multiplier_row",
",",
"multiplier_col",
"=",
"len",
"(",
"multiplier",
")",
",",
"len",
"(",
"multiplier",
"[",
"0",
"]",
")",
"if",
"(",
"multiplicand_col",
"!=",
"multiplier_row",
")",
":",
"raise",
"Exception",
"(",
"\"Multiplicand matrix not compatible with Multiplier matrix.\"",
")",
"# create a result matrix",
"result",
"=",
"[",
"[",
"0",
"]",
"*",
"multiplier_col",
"for",
"i",
"in",
"range",
"(",
"multiplicand_row",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"multiplicand_row",
")",
":",
"for",
"j",
"in",
"range",
"(",
"multiplier_col",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"multiplier",
")",
")",
":",
"result",
"[",
"i",
"]",
"[",
"j",
"]",
"+=",
"multiplicand",
"[",
"i",
"]",
"[",
"k",
"]",
"*",
"multiplier",
"[",
"k",
"]",
"[",
"j",
"]",
"return",
"result"
]
| :type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]] | [
":",
"type",
"A",
":",
"List",
"[",
"List",
"[",
"int",
"]]",
":",
"type",
"B",
":",
"List",
"[",
"List",
"[",
"int",
"]]",
":",
"rtype",
":",
"List",
"[",
"List",
"[",
"int",
"]]"
]
| python | train |
weld-project/weld | python/pyweld/weld/bindings.py | https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/pyweld/weld/bindings.py#L53-L72 | def run(self, conf, arg, err):
"""
WeldContext is currently hidden from the Python API. We create a new
context per Weld run and give ownership of it to the resulting value.
NOTE: This can leak the context if the result of the Weld run is an
error.
"""
weld_context_new = weld.weld_context_new
weld_context_new.argtypes = [c_weld_conf]
weld_context_new.restype = c_weld_context
ctx = weld_context_new(conf.conf)
weld_module_run = weld.weld_module_run
# module, context, arg, &err
weld_module_run.argtypes = [
c_weld_module, c_weld_context, c_weld_value, c_weld_err]
weld_module_run.restype = c_weld_value
ret = weld_module_run(self.module, ctx, arg.val, err.error)
return WeldValue(ret, assign=True, _ctx=ctx) | [
"def",
"run",
"(",
"self",
",",
"conf",
",",
"arg",
",",
"err",
")",
":",
"weld_context_new",
"=",
"weld",
".",
"weld_context_new",
"weld_context_new",
".",
"argtypes",
"=",
"[",
"c_weld_conf",
"]",
"weld_context_new",
".",
"restype",
"=",
"c_weld_context",
"ctx",
"=",
"weld_context_new",
"(",
"conf",
".",
"conf",
")",
"weld_module_run",
"=",
"weld",
".",
"weld_module_run",
"# module, context, arg, &err",
"weld_module_run",
".",
"argtypes",
"=",
"[",
"c_weld_module",
",",
"c_weld_context",
",",
"c_weld_value",
",",
"c_weld_err",
"]",
"weld_module_run",
".",
"restype",
"=",
"c_weld_value",
"ret",
"=",
"weld_module_run",
"(",
"self",
".",
"module",
",",
"ctx",
",",
"arg",
".",
"val",
",",
"err",
".",
"error",
")",
"return",
"WeldValue",
"(",
"ret",
",",
"assign",
"=",
"True",
",",
"_ctx",
"=",
"ctx",
")"
]
| WeldContext is currently hidden from the Python API. We create a new
context per Weld run and give ownership of it to the resulting value.
NOTE: This can leak the context if the result of the Weld run is an
error. | [
"WeldContext",
"is",
"currently",
"hidden",
"from",
"the",
"Python",
"API",
".",
"We",
"create",
"a",
"new",
"context",
"per",
"Weld",
"run",
"and",
"give",
"ownership",
"of",
"it",
"to",
"the",
"resulting",
"value",
"."
]
| python | train |
croscon/fleaker | fleaker/config.py | https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L346-L379 | def _run_post_configure_callbacks(self, configure_args):
"""Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything.
"""
resulting_configuration = ImmutableDict(self.config)
# copy callbacks in case people edit them while running
multiple_callbacks = copy.copy(
self._post_configure_callbacks['multiple']
)
single_callbacks = copy.copy(self._post_configure_callbacks['single'])
# clear out the singles
self._post_configure_callbacks['single'] = []
for callback in multiple_callbacks:
callback(resulting_configuration, configure_args)
# now do the single run callbacks
for callback in single_callbacks:
callback(resulting_configuration, configure_args) | [
"def",
"_run_post_configure_callbacks",
"(",
"self",
",",
"configure_args",
")",
":",
"resulting_configuration",
"=",
"ImmutableDict",
"(",
"self",
".",
"config",
")",
"# copy callbacks in case people edit them while running",
"multiple_callbacks",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_post_configure_callbacks",
"[",
"'multiple'",
"]",
")",
"single_callbacks",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_post_configure_callbacks",
"[",
"'single'",
"]",
")",
"# clear out the singles",
"self",
".",
"_post_configure_callbacks",
"[",
"'single'",
"]",
"=",
"[",
"]",
"for",
"callback",
"in",
"multiple_callbacks",
":",
"callback",
"(",
"resulting_configuration",
",",
"configure_args",
")",
"# now do the single run callbacks",
"for",
"callback",
"in",
"single_callbacks",
":",
"callback",
"(",
"resulting_configuration",
",",
"configure_args",
")"
]
| Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything. | [
"Run",
"all",
"post",
"configure",
"callbacks",
"we",
"have",
"stored",
"."
]
| python | train |
kowalpy/Robot-Framework-FTP-Library | FtpLibrary.py | https://github.com/kowalpy/Robot-Framework-FTP-Library/blob/90794be0a12af489ac98e8ae3b4ff450c83e2f3d/FtpLibrary.py#L438-L453 | def ftp_close(self, connId='default'):
"""
Closes FTP connection. Returns None.
Parameters:
- connId(optional) - connection identifier. By default equals 'default'
"""
thisConn = self.__getConnection(connId)
try:
thisConn.quit()
self.__removeConnection(connId)
except Exception as e:
try:
thisConn.close()
self.__removeConnection(connId)
except ftplib.all_errors as x:
raise FtpLibraryError(str(x)) | [
"def",
"ftp_close",
"(",
"self",
",",
"connId",
"=",
"'default'",
")",
":",
"thisConn",
"=",
"self",
".",
"__getConnection",
"(",
"connId",
")",
"try",
":",
"thisConn",
".",
"quit",
"(",
")",
"self",
".",
"__removeConnection",
"(",
"connId",
")",
"except",
"Exception",
"as",
"e",
":",
"try",
":",
"thisConn",
".",
"close",
"(",
")",
"self",
".",
"__removeConnection",
"(",
"connId",
")",
"except",
"ftplib",
".",
"all_errors",
"as",
"x",
":",
"raise",
"FtpLibraryError",
"(",
"str",
"(",
"x",
")",
")"
]
| Closes FTP connection. Returns None.
Parameters:
- connId(optional) - connection identifier. By default equals 'default' | [
"Closes",
"FTP",
"connection",
".",
"Returns",
"None",
".",
"Parameters",
":",
"-",
"connId",
"(",
"optional",
")",
"-",
"connection",
"identifier",
".",
"By",
"default",
"equals",
"default"
]
| python | train |
uber/tchannel-python | tchannel/tornado/tchannel.py | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/tchannel.py#L189-L232 | def advertise(
self,
routers=None,
name=None,
timeout=None,
router_file=None,
jitter=None,
):
"""Make a service available on the Hyperbahn routing mesh.
This will make contact with a Hyperbahn host from a list of known
Hyperbahn routers. Additional Hyperbahn connections will be established
once contact has been made with the network.
:param router:
A seed list of addresses of Hyperbahn routers, e.g.,
``["127.0.0.1:23000"]``.
:param name:
The identity of this service on the Hyperbahn.
This is usually unnecessary, as it defaults to the name given when
initializing the :py:class:`TChannel` (which is used as your
identity as a caller).
:returns:
A future that resolves to the remote server's response after
the first advertise finishes.
Advertisement will continue to happen periodically.
"""
name = name or self.name
if not self.is_listening():
self.listen()
return hyperbahn.advertise(
self,
name,
routers,
timeout,
router_file,
jitter,
) | [
"def",
"advertise",
"(",
"self",
",",
"routers",
"=",
"None",
",",
"name",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"router_file",
"=",
"None",
",",
"jitter",
"=",
"None",
",",
")",
":",
"name",
"=",
"name",
"or",
"self",
".",
"name",
"if",
"not",
"self",
".",
"is_listening",
"(",
")",
":",
"self",
".",
"listen",
"(",
")",
"return",
"hyperbahn",
".",
"advertise",
"(",
"self",
",",
"name",
",",
"routers",
",",
"timeout",
",",
"router_file",
",",
"jitter",
",",
")"
]
| Make a service available on the Hyperbahn routing mesh.
This will make contact with a Hyperbahn host from a list of known
Hyperbahn routers. Additional Hyperbahn connections will be established
once contact has been made with the network.
:param router:
A seed list of addresses of Hyperbahn routers, e.g.,
``["127.0.0.1:23000"]``.
:param name:
The identity of this service on the Hyperbahn.
This is usually unnecessary, as it defaults to the name given when
initializing the :py:class:`TChannel` (which is used as your
identity as a caller).
:returns:
A future that resolves to the remote server's response after
the first advertise finishes.
Advertisement will continue to happen periodically. | [
"Make",
"a",
"service",
"available",
"on",
"the",
"Hyperbahn",
"routing",
"mesh",
"."
]
| python | train |
mwouts/jupytext | jupytext/cell_metadata.py | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_metadata.py#L213-L251 | def rmd_options_to_metadata(options):
"""
Parse rmd options and return a metadata dictionary
:param options:
:return:
"""
options = re.split(r'\s|,', options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(' ,')
others = others.lstrip(' ,')
chunk_options = parse_rmd_options(others)
language = 'R' if language == 'r' else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == '':
metadata['name'] = value
continue
else:
if update_metadata_from_rmd_options(name, value, metadata):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if ('active' in metadata or metadata.get('run_control', {}).get('frozen') is True) and 'eval' in metadata:
del metadata['eval']
return metadata.get('language') or language, metadata | [
"def",
"rmd_options_to_metadata",
"(",
"options",
")",
":",
"options",
"=",
"re",
".",
"split",
"(",
"r'\\s|,'",
",",
"options",
",",
"1",
")",
"if",
"len",
"(",
"options",
")",
"==",
"1",
":",
"language",
"=",
"options",
"[",
"0",
"]",
"chunk_options",
"=",
"[",
"]",
"else",
":",
"language",
",",
"others",
"=",
"options",
"language",
"=",
"language",
".",
"rstrip",
"(",
"' ,'",
")",
"others",
"=",
"others",
".",
"lstrip",
"(",
"' ,'",
")",
"chunk_options",
"=",
"parse_rmd_options",
"(",
"others",
")",
"language",
"=",
"'R'",
"if",
"language",
"==",
"'r'",
"else",
"language",
"metadata",
"=",
"{",
"}",
"for",
"i",
",",
"opt",
"in",
"enumerate",
"(",
"chunk_options",
")",
":",
"name",
",",
"value",
"=",
"opt",
"if",
"i",
"==",
"0",
"and",
"name",
"==",
"''",
":",
"metadata",
"[",
"'name'",
"]",
"=",
"value",
"continue",
"else",
":",
"if",
"update_metadata_from_rmd_options",
"(",
"name",
",",
"value",
",",
"metadata",
")",
":",
"continue",
"try",
":",
"metadata",
"[",
"name",
"]",
"=",
"_py_logical_values",
"(",
"value",
")",
"continue",
"except",
"RLogicalValueError",
":",
"metadata",
"[",
"name",
"]",
"=",
"value",
"for",
"name",
"in",
"metadata",
":",
"try_eval_metadata",
"(",
"metadata",
",",
"name",
")",
"if",
"(",
"'active'",
"in",
"metadata",
"or",
"metadata",
".",
"get",
"(",
"'run_control'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'frozen'",
")",
"is",
"True",
")",
"and",
"'eval'",
"in",
"metadata",
":",
"del",
"metadata",
"[",
"'eval'",
"]",
"return",
"metadata",
".",
"get",
"(",
"'language'",
")",
"or",
"language",
",",
"metadata"
]
| Parse rmd options and return a metadata dictionary
:param options:
:return: | [
"Parse",
"rmd",
"options",
"and",
"return",
"a",
"metadata",
"dictionary",
":",
"param",
"options",
":",
":",
"return",
":"
]
| python | train |
AdvancedClimateSystems/uModbus | umodbus/client/serial/redundancy_check.py | https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/client/serial/redundancy_check.py#L34-L56 | def get_crc(msg):
""" Return CRC of 2 byte for message.
>>> assert get_crc(b'\x02\x07') == struct.unpack('<H', b'\x41\x12')
:param msg: A byte array.
:return: Byte array of 2 bytes.
"""
register = 0xFFFF
for byte_ in msg:
try:
val = struct.unpack('<B', byte_)[0]
# Iterating over a bit-like objects in Python 3 gets you ints.
# Because fuck logic.
except TypeError:
val = byte_
register = \
(register >> 8) ^ look_up_table[(register ^ val) & 0xFF]
# CRC is little-endian!
return struct.pack('<H', register) | [
"def",
"get_crc",
"(",
"msg",
")",
":",
"register",
"=",
"0xFFFF",
"for",
"byte_",
"in",
"msg",
":",
"try",
":",
"val",
"=",
"struct",
".",
"unpack",
"(",
"'<B'",
",",
"byte_",
")",
"[",
"0",
"]",
"# Iterating over a bit-like objects in Python 3 gets you ints.",
"# Because fuck logic.",
"except",
"TypeError",
":",
"val",
"=",
"byte_",
"register",
"=",
"(",
"register",
">>",
"8",
")",
"^",
"look_up_table",
"[",
"(",
"register",
"^",
"val",
")",
"&",
"0xFF",
"]",
"# CRC is little-endian!",
"return",
"struct",
".",
"pack",
"(",
"'<H'",
",",
"register",
")"
]
| Return CRC of 2 byte for message.
>>> assert get_crc(b'\x02\x07') == struct.unpack('<H', b'\x41\x12')
:param msg: A byte array.
:return: Byte array of 2 bytes. | [
"Return",
"CRC",
"of",
"2",
"byte",
"for",
"message",
"."
]
| python | train |
watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L6739-L6747 | def _from_dict(cls, _dict):
"""Initialize a ListConfigurationsResponse object from a json dictionary."""
args = {}
if 'configurations' in _dict:
args['configurations'] = [
Configuration._from_dict(x)
for x in (_dict.get('configurations'))
]
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'configurations'",
"in",
"_dict",
":",
"args",
"[",
"'configurations'",
"]",
"=",
"[",
"Configuration",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'configurations'",
")",
")",
"]",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
]
| Initialize a ListConfigurationsResponse object from a json dictionary. | [
"Initialize",
"a",
"ListConfigurationsResponse",
"object",
"from",
"a",
"json",
"dictionary",
"."
]
| python | train |
Robpol86/colorclass | colorclass/core.py | https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/core.py#L37-L48 | def decode(self, encoding='utf-8', errors='strict'):
"""Decode using the codec registered for encoding. Default encoding is 'utf-8'.
errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors
raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name
registered with codecs.register_error that is able to handle UnicodeDecodeErrors.
:param str encoding: Codec.
:param str errors: Error handling scheme.
"""
original_class = getattr(self, 'original_class')
return original_class(super(ColorBytes, self).decode(encoding, errors)) | [
"def",
"decode",
"(",
"self",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"original_class",
"=",
"getattr",
"(",
"self",
",",
"'original_class'",
")",
"return",
"original_class",
"(",
"super",
"(",
"ColorBytes",
",",
"self",
")",
".",
"decode",
"(",
"encoding",
",",
"errors",
")",
")"
]
| Decode using the codec registered for encoding. Default encoding is 'utf-8'.
errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors
raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name
registered with codecs.register_error that is able to handle UnicodeDecodeErrors.
:param str encoding: Codec.
:param str errors: Error handling scheme. | [
"Decode",
"using",
"the",
"codec",
"registered",
"for",
"encoding",
".",
"Default",
"encoding",
"is",
"utf",
"-",
"8",
"."
]
| python | train |
hubo1016/vlcp | vlcp/server/module.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/server/module.py#L781-L806 | async def batch_call_api(container, apis, timeout = 120.0):
"""
DEPRECATED - use execute_all instead
"""
apiHandles = [(object(), api) for api in apis]
apiEvents = [ModuleAPICall(handle, targetname, name, params = params)
for handle, (targetname, name, params) in apiHandles]
apiMatchers = tuple(ModuleAPIReply.createMatcher(handle) for handle, _ in apiHandles)
async def process():
for e in apiEvents:
await container.wait_for_send(e)
container.subroutine(process(), False)
eventdict = {}
async def process2():
ms = len(apiMatchers)
matchers = Diff_(apiMatchers)
while ms:
ev, m = await matchers
matchers = Diff_(matchers, remove=(m,))
eventdict[ev.handle] = ev
await container.execute_with_timeout(timeout, process2())
for e in apiEvents:
if e.handle not in eventdict:
e.canignore = True
container.scheduler.ignore(ModuleAPICall.createMatcher(e.handle))
return [eventdict.get(handle, None) for handle, _ in apiHandles] | [
"async",
"def",
"batch_call_api",
"(",
"container",
",",
"apis",
",",
"timeout",
"=",
"120.0",
")",
":",
"apiHandles",
"=",
"[",
"(",
"object",
"(",
")",
",",
"api",
")",
"for",
"api",
"in",
"apis",
"]",
"apiEvents",
"=",
"[",
"ModuleAPICall",
"(",
"handle",
",",
"targetname",
",",
"name",
",",
"params",
"=",
"params",
")",
"for",
"handle",
",",
"(",
"targetname",
",",
"name",
",",
"params",
")",
"in",
"apiHandles",
"]",
"apiMatchers",
"=",
"tuple",
"(",
"ModuleAPIReply",
".",
"createMatcher",
"(",
"handle",
")",
"for",
"handle",
",",
"_",
"in",
"apiHandles",
")",
"async",
"def",
"process",
"(",
")",
":",
"for",
"e",
"in",
"apiEvents",
":",
"await",
"container",
".",
"wait_for_send",
"(",
"e",
")",
"container",
".",
"subroutine",
"(",
"process",
"(",
")",
",",
"False",
")",
"eventdict",
"=",
"{",
"}",
"async",
"def",
"process2",
"(",
")",
":",
"ms",
"=",
"len",
"(",
"apiMatchers",
")",
"matchers",
"=",
"Diff_",
"(",
"apiMatchers",
")",
"while",
"ms",
":",
"ev",
",",
"m",
"=",
"await",
"matchers",
"matchers",
"=",
"Diff_",
"(",
"matchers",
",",
"remove",
"=",
"(",
"m",
",",
")",
")",
"eventdict",
"[",
"ev",
".",
"handle",
"]",
"=",
"ev",
"await",
"container",
".",
"execute_with_timeout",
"(",
"timeout",
",",
"process2",
"(",
")",
")",
"for",
"e",
"in",
"apiEvents",
":",
"if",
"e",
".",
"handle",
"not",
"in",
"eventdict",
":",
"e",
".",
"canignore",
"=",
"True",
"container",
".",
"scheduler",
".",
"ignore",
"(",
"ModuleAPICall",
".",
"createMatcher",
"(",
"e",
".",
"handle",
")",
")",
"return",
"[",
"eventdict",
".",
"get",
"(",
"handle",
",",
"None",
")",
"for",
"handle",
",",
"_",
"in",
"apiHandles",
"]"
]
| DEPRECATED - use execute_all instead | [
"DEPRECATED",
"-",
"use",
"execute_all",
"instead"
]
| python | train |
wummel/linkchecker | third_party/dnspython/dns/renderer.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/renderer.py#L138-L157 | def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
"""Add a question to the message.
@param qname: the question name
@type qname: dns.name.Name
@param rdtype: the question rdata type
@type rdtype: int
@param rdclass: the question rdata class
@type rdclass: int
"""
self._set_section(QUESTION)
before = self.output.tell()
qname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack("!HH", rdtype, rdclass))
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[QUESTION] += 1 | [
"def",
"add_question",
"(",
"self",
",",
"qname",
",",
"rdtype",
",",
"rdclass",
"=",
"dns",
".",
"rdataclass",
".",
"IN",
")",
":",
"self",
".",
"_set_section",
"(",
"QUESTION",
")",
"before",
"=",
"self",
".",
"output",
".",
"tell",
"(",
")",
"qname",
".",
"to_wire",
"(",
"self",
".",
"output",
",",
"self",
".",
"compress",
",",
"self",
".",
"origin",
")",
"self",
".",
"output",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"!HH\"",
",",
"rdtype",
",",
"rdclass",
")",
")",
"after",
"=",
"self",
".",
"output",
".",
"tell",
"(",
")",
"if",
"after",
">=",
"self",
".",
"max_size",
":",
"self",
".",
"_rollback",
"(",
"before",
")",
"raise",
"dns",
".",
"exception",
".",
"TooBig",
"self",
".",
"counts",
"[",
"QUESTION",
"]",
"+=",
"1"
]
| Add a question to the message.
@param qname: the question name
@type qname: dns.name.Name
@param rdtype: the question rdata type
@type rdtype: int
@param rdclass: the question rdata class
@type rdclass: int | [
"Add",
"a",
"question",
"to",
"the",
"message",
"."
]
| python | train |
sosy-lab/benchexec | benchexec/model.py | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/model.py#L672-L697 | def expand_filename_pattern(self, pattern, base_dir, sourcefile=None):
"""
The function expand_filename_pattern expands a filename pattern to a sorted list
of filenames. The pattern can contain variables and wildcards.
If base_dir is given and pattern is not absolute, base_dir and pattern are joined.
"""
# replace vars like ${benchmark_path},
# with converting to list and back, we can use the function 'substitute_vars()'
expandedPattern = substitute_vars([pattern], self, sourcefile)
assert len(expandedPattern) == 1
expandedPattern = expandedPattern[0]
if expandedPattern != pattern:
logging.debug("Expanded variables in expression %r to %r.",
pattern, expandedPattern)
fileList = util.expand_filename_pattern(expandedPattern, base_dir)
# sort alphabetical,
fileList.sort()
if not fileList:
logging.warning("No files found matching %r.", pattern)
return fileList | [
"def",
"expand_filename_pattern",
"(",
"self",
",",
"pattern",
",",
"base_dir",
",",
"sourcefile",
"=",
"None",
")",
":",
"# replace vars like ${benchmark_path},",
"# with converting to list and back, we can use the function 'substitute_vars()'",
"expandedPattern",
"=",
"substitute_vars",
"(",
"[",
"pattern",
"]",
",",
"self",
",",
"sourcefile",
")",
"assert",
"len",
"(",
"expandedPattern",
")",
"==",
"1",
"expandedPattern",
"=",
"expandedPattern",
"[",
"0",
"]",
"if",
"expandedPattern",
"!=",
"pattern",
":",
"logging",
".",
"debug",
"(",
"\"Expanded variables in expression %r to %r.\"",
",",
"pattern",
",",
"expandedPattern",
")",
"fileList",
"=",
"util",
".",
"expand_filename_pattern",
"(",
"expandedPattern",
",",
"base_dir",
")",
"# sort alphabetical,",
"fileList",
".",
"sort",
"(",
")",
"if",
"not",
"fileList",
":",
"logging",
".",
"warning",
"(",
"\"No files found matching %r.\"",
",",
"pattern",
")",
"return",
"fileList"
]
| The function expand_filename_pattern expands a filename pattern to a sorted list
of filenames. The pattern can contain variables and wildcards.
If base_dir is given and pattern is not absolute, base_dir and pattern are joined. | [
"The",
"function",
"expand_filename_pattern",
"expands",
"a",
"filename",
"pattern",
"to",
"a",
"sorted",
"list",
"of",
"filenames",
".",
"The",
"pattern",
"can",
"contain",
"variables",
"and",
"wildcards",
".",
"If",
"base_dir",
"is",
"given",
"and",
"pattern",
"is",
"not",
"absolute",
"base_dir",
"and",
"pattern",
"are",
"joined",
"."
]
| python | train |
orbingol/NURBS-Python | geomdl/operations.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1054-L1078 | def length_curve(obj):
""" Computes the approximate length of the parametric curve.
Uses the following equation to compute the approximate length:
.. math::
\\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2}
where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point.
:param obj: input curve
:type obj: abstract.Curve
:return: length
:rtype: float
"""
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
length = 0.0
evalpts = obj.evalpts
num_evalpts = len(obj.evalpts)
for idx in range(num_evalpts - 1):
length += linalg.point_distance(evalpts[idx], evalpts[idx + 1])
return length | [
"def",
"length_curve",
"(",
"obj",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Input shape must be an instance of abstract.Curve class\"",
")",
"length",
"=",
"0.0",
"evalpts",
"=",
"obj",
".",
"evalpts",
"num_evalpts",
"=",
"len",
"(",
"obj",
".",
"evalpts",
")",
"for",
"idx",
"in",
"range",
"(",
"num_evalpts",
"-",
"1",
")",
":",
"length",
"+=",
"linalg",
".",
"point_distance",
"(",
"evalpts",
"[",
"idx",
"]",
",",
"evalpts",
"[",
"idx",
"+",
"1",
"]",
")",
"return",
"length"
]
| Computes the approximate length of the parametric curve.
Uses the following equation to compute the approximate length:
.. math::
\\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2}
where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point.
:param obj: input curve
:type obj: abstract.Curve
:return: length
:rtype: float | [
"Computes",
"the",
"approximate",
"length",
"of",
"the",
"parametric",
"curve",
"."
]
| python | train |
SuperCowPowers/workbench | workbench/server/workbench_server.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/workbench_server.py#L844-L874 | def run():
""" Run the workbench server """
# Load the configuration file relative to this script location
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')
workbench_conf = ConfigParser.ConfigParser()
config_ini = workbench_conf.read(config_path)
if not config_ini:
print 'Could not locate config.ini file, tried %s : exiting...' % config_path
exit(1)
# Pull configuration settings
datastore_uri = workbench_conf.get('workbench', 'datastore_uri')
database = workbench_conf.get('workbench', 'database')
worker_cap = workbench_conf.getint('workbench', 'worker_cap')
samples_cap = workbench_conf.getint('workbench', 'samples_cap')
# Spin up Workbench ZeroRPC
try:
store_args = {'uri': datastore_uri, 'database': database, 'worker_cap':worker_cap, 'samples_cap':samples_cap}
workbench = zerorpc.Server(WorkBench(store_args=store_args), name='workbench', heartbeat=60)
workbench.bind('tcp://0.0.0.0:4242')
print '\nWorkbench is ready and feeling super duper!'
gevent_signal(signal.SIGTERM, workbench.stop)
gevent_signal(signal.SIGINT, workbench.stop)
gevent_signal(signal.SIGKILL, workbench.stop)
workbench.run()
print '\nWorkbench Server Shutting Down... and dreaming of sheep...'
except zmq.error.ZMQError:
print '\nInfo: Could not start Workbench server (no worries, probably already running...)\n' | [
"def",
"run",
"(",
")",
":",
"# Load the configuration file relative to this script location",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"'config.ini'",
")",
"workbench_conf",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"config_ini",
"=",
"workbench_conf",
".",
"read",
"(",
"config_path",
")",
"if",
"not",
"config_ini",
":",
"print",
"'Could not locate config.ini file, tried %s : exiting...'",
"%",
"config_path",
"exit",
"(",
"1",
")",
"# Pull configuration settings",
"datastore_uri",
"=",
"workbench_conf",
".",
"get",
"(",
"'workbench'",
",",
"'datastore_uri'",
")",
"database",
"=",
"workbench_conf",
".",
"get",
"(",
"'workbench'",
",",
"'database'",
")",
"worker_cap",
"=",
"workbench_conf",
".",
"getint",
"(",
"'workbench'",
",",
"'worker_cap'",
")",
"samples_cap",
"=",
"workbench_conf",
".",
"getint",
"(",
"'workbench'",
",",
"'samples_cap'",
")",
"# Spin up Workbench ZeroRPC",
"try",
":",
"store_args",
"=",
"{",
"'uri'",
":",
"datastore_uri",
",",
"'database'",
":",
"database",
",",
"'worker_cap'",
":",
"worker_cap",
",",
"'samples_cap'",
":",
"samples_cap",
"}",
"workbench",
"=",
"zerorpc",
".",
"Server",
"(",
"WorkBench",
"(",
"store_args",
"=",
"store_args",
")",
",",
"name",
"=",
"'workbench'",
",",
"heartbeat",
"=",
"60",
")",
"workbench",
".",
"bind",
"(",
"'tcp://0.0.0.0:4242'",
")",
"print",
"'\\nWorkbench is ready and feeling super duper!'",
"gevent_signal",
"(",
"signal",
".",
"SIGTERM",
",",
"workbench",
".",
"stop",
")",
"gevent_signal",
"(",
"signal",
".",
"SIGINT",
",",
"workbench",
".",
"stop",
")",
"gevent_signal",
"(",
"signal",
".",
"SIGKILL",
",",
"workbench",
".",
"stop",
")",
"workbench",
".",
"run",
"(",
")",
"print",
"'\\nWorkbench Server Shutting Down... and dreaming of sheep...'",
"except",
"zmq",
".",
"error",
".",
"ZMQError",
":",
"print",
"'\\nInfo: Could not start Workbench server (no worries, probably already running...)\\n'"
]
| Run the workbench server | [
"Run",
"the",
"workbench",
"server"
]
| python | train |
JoeVirtual/KonFoo | konfoo/core.py | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1080-L1102 | def extend(self, iterable):
""" Extends the `Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the `Sequence`.
"""
# Sequence
if is_sequence(iterable):
self._data.extend(iterable)
# Structure
elif is_structure(iterable):
members = [item for item in iterable.values()]
self._data.extend(members)
# Field
elif is_field(iterable):
self._data.extend([iterable])
# Iterable
elif isinstance(iterable, (set, tuple, list)):
self._data.extend(Sequence(iterable))
else:
raise MemberTypeError(self, iterable, member=len(self)) | [
"def",
"extend",
"(",
"self",
",",
"iterable",
")",
":",
"# Sequence",
"if",
"is_sequence",
"(",
"iterable",
")",
":",
"self",
".",
"_data",
".",
"extend",
"(",
"iterable",
")",
"# Structure",
"elif",
"is_structure",
"(",
"iterable",
")",
":",
"members",
"=",
"[",
"item",
"for",
"item",
"in",
"iterable",
".",
"values",
"(",
")",
"]",
"self",
".",
"_data",
".",
"extend",
"(",
"members",
")",
"# Field",
"elif",
"is_field",
"(",
"iterable",
")",
":",
"self",
".",
"_data",
".",
"extend",
"(",
"[",
"iterable",
"]",
")",
"# Iterable",
"elif",
"isinstance",
"(",
"iterable",
",",
"(",
"set",
",",
"tuple",
",",
"list",
")",
")",
":",
"self",
".",
"_data",
".",
"extend",
"(",
"Sequence",
"(",
"iterable",
")",
")",
"else",
":",
"raise",
"MemberTypeError",
"(",
"self",
",",
"iterable",
",",
"member",
"=",
"len",
"(",
"self",
")",
")"
]
| Extends the `Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the `Sequence`. | [
"Extends",
"the",
"Sequence",
"by",
"appending",
"items",
"from",
"the",
"*",
"iterable",
"*",
"."
]
| python | train |
marshmallow-code/flask-marshmallow | src/flask_marshmallow/__init__.py | https://github.com/marshmallow-code/flask-marshmallow/blob/8483fa55cab47f0d0ed23e3fa876b22a1d8e7873/src/flask_marshmallow/__init__.py#L105-L116 | def init_app(self, app):
"""Initializes the application with the extension.
:param Flask app: The Flask application object.
"""
app.extensions = getattr(app, "extensions", {})
# If using Flask-SQLAlchemy, attach db.session to ModelSchema
if has_sqla and "sqlalchemy" in app.extensions:
db = app.extensions["sqlalchemy"].db
self.ModelSchema.OPTIONS_CLASS.session = db.session
app.extensions[EXTENSION_NAME] = self | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"app",
".",
"extensions",
"=",
"getattr",
"(",
"app",
",",
"\"extensions\"",
",",
"{",
"}",
")",
"# If using Flask-SQLAlchemy, attach db.session to ModelSchema",
"if",
"has_sqla",
"and",
"\"sqlalchemy\"",
"in",
"app",
".",
"extensions",
":",
"db",
"=",
"app",
".",
"extensions",
"[",
"\"sqlalchemy\"",
"]",
".",
"db",
"self",
".",
"ModelSchema",
".",
"OPTIONS_CLASS",
".",
"session",
"=",
"db",
".",
"session",
"app",
".",
"extensions",
"[",
"EXTENSION_NAME",
"]",
"=",
"self"
]
| Initializes the application with the extension.
:param Flask app: The Flask application object. | [
"Initializes",
"the",
"application",
"with",
"the",
"extension",
"."
]
| python | train |
michael-lazar/rtv | rtv/packages/praw/__init__.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1966-L1985 | def get_mod_log(self, subreddit, mod=None, action=None, *args, **kwargs):
"""Return a get_content generator for moderation log items.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the modlog for.
:param mod: If given, only return the actions made by this moderator.
Both a moderator name or Redditor object can be used here.
:param action: If given, only return entries for the specified action.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
params = kwargs.setdefault('params', {})
if mod is not None:
params['mod'] = six.text_type(mod)
if action is not None:
params['type'] = six.text_type(action)
url = self.config['modlog'].format(subreddit=six.text_type(subreddit))
return self.get_content(url, *args, **kwargs) | [
"def",
"get_mod_log",
"(",
"self",
",",
"subreddit",
",",
"mod",
"=",
"None",
",",
"action",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"kwargs",
".",
"setdefault",
"(",
"'params'",
",",
"{",
"}",
")",
"if",
"mod",
"is",
"not",
"None",
":",
"params",
"[",
"'mod'",
"]",
"=",
"six",
".",
"text_type",
"(",
"mod",
")",
"if",
"action",
"is",
"not",
"None",
":",
"params",
"[",
"'type'",
"]",
"=",
"six",
".",
"text_type",
"(",
"action",
")",
"url",
"=",
"self",
".",
"config",
"[",
"'modlog'",
"]",
".",
"format",
"(",
"subreddit",
"=",
"six",
".",
"text_type",
"(",
"subreddit",
")",
")",
"return",
"self",
".",
"get_content",
"(",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Return a get_content generator for moderation log items.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the modlog for.
:param mod: If given, only return the actions made by this moderator.
Both a moderator name or Redditor object can be used here.
:param action: If given, only return entries for the specified action.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered. | [
"Return",
"a",
"get_content",
"generator",
"for",
"moderation",
"log",
"items",
"."
]
| python | train |
openpermissions/chub | chub/api.py | https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/api.py#L150-L158 | def login(self, email, password):
"""
login using email and password
:param email: email address
:param password: password
"""
rsp = self._request()
self.default_headers['Authorization'] = rsp.data['token']
return rsp | [
"def",
"login",
"(",
"self",
",",
"email",
",",
"password",
")",
":",
"rsp",
"=",
"self",
".",
"_request",
"(",
")",
"self",
".",
"default_headers",
"[",
"'Authorization'",
"]",
"=",
"rsp",
".",
"data",
"[",
"'token'",
"]",
"return",
"rsp"
]
| login using email and password
:param email: email address
:param password: password | [
"login",
"using",
"email",
"and",
"password",
":",
"param",
"email",
":",
"email",
"address",
":",
"param",
"password",
":",
"password"
]
| python | train |
OpenKMIP/PyKMIP | kmip/core/messages/payloads/create.py | https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/create.py#L95-L161 | def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload.
"""
super(CreateRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):
self._object_type = primitives.Enumeration(
enums.ObjectType,
tag=enums.Tags.OBJECT_TYPE
)
self._object_type.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the object "
"type."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):
self._template_attribute = objects.TemplateAttribute()
self._template_attribute.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the "
"template attribute."
)
else:
# NOTE (ph) For now, leave attributes natively in TemplateAttribute
# form and just convert to the KMIP 2.0 Attributes form as needed
# for encoding/decoding purposes. Changing the payload to require
# the new Attributes structure will trigger a bunch of second-order
# effects across the client and server codebases that is beyond
# the scope of updating the Create payloads to support KMIP 2.0.
if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):
attributes = objects.Attributes()
attributes.read(local_buffer, kmip_version=kmip_version)
value = objects.convert_attributes_to_template_attribute(
attributes
)
self._template_attribute = value
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the "
"attributes structure."
)
self.is_oversized(local_buffer) | [
"def",
"read",
"(",
"self",
",",
"input_buffer",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"CreateRequestPayload",
",",
"self",
")",
".",
"read",
"(",
"input_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_buffer",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_buffer",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"OBJECT_TYPE",
",",
"local_buffer",
")",
":",
"self",
".",
"_object_type",
"=",
"primitives",
".",
"Enumeration",
"(",
"enums",
".",
"ObjectType",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"OBJECT_TYPE",
")",
"self",
".",
"_object_type",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The Create request payload encoding is missing the object \"",
"\"type.\"",
")",
"if",
"kmip_version",
"<",
"enums",
".",
"KMIPVersion",
".",
"KMIP_2_0",
":",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"TEMPLATE_ATTRIBUTE",
",",
"local_buffer",
")",
":",
"self",
".",
"_template_attribute",
"=",
"objects",
".",
"TemplateAttribute",
"(",
")",
"self",
".",
"_template_attribute",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The Create request payload encoding is missing the \"",
"\"template attribute.\"",
")",
"else",
":",
"# NOTE (ph) For now, leave attributes natively in TemplateAttribute",
"# form and just convert to the KMIP 2.0 Attributes form as needed",
"# for encoding/decoding purposes. Changing the payload to require",
"# the new Attributes structure will trigger a bunch of second-order",
"# effects across the client and server codebases that is beyond",
"# the scope of updating the Create payloads to support KMIP 2.0.",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"ATTRIBUTES",
",",
"local_buffer",
")",
":",
"attributes",
"=",
"objects",
".",
"Attributes",
"(",
")",
"attributes",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"value",
"=",
"objects",
".",
"convert_attributes_to_template_attribute",
"(",
"attributes",
")",
"self",
".",
"_template_attribute",
"=",
"value",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The Create request payload encoding is missing the \"",
"\"attributes structure.\"",
")",
"self",
".",
"is_oversized",
"(",
"local_buffer",
")"
]
| Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload. | [
"Read",
"the",
"data",
"encoding",
"the",
"Create",
"request",
"payload",
"and",
"decode",
"it",
"into",
"its",
"constituent",
"parts",
"."
]
| python | test |
rafaelsierra/django-json-mixin-form | src/sierra/dj/mixins/forms.py | https://github.com/rafaelsierra/django-json-mixin-form/blob/004149a1077eba8c072ebbfb6eb6b86a57564ecf/src/sierra/dj/mixins/forms.py#L52-L58 | def _get_field_error_dict(self, field):
'''Returns the dict containing the field errors information'''
return {
'name': field.html_name,
'id': 'id_{}'.format(field.html_name), # This may be a problem
'errors': field.errors,
} | [
"def",
"_get_field_error_dict",
"(",
"self",
",",
"field",
")",
":",
"return",
"{",
"'name'",
":",
"field",
".",
"html_name",
",",
"'id'",
":",
"'id_{}'",
".",
"format",
"(",
"field",
".",
"html_name",
")",
",",
"# This may be a problem",
"'errors'",
":",
"field",
".",
"errors",
",",
"}"
]
| Returns the dict containing the field errors information | [
"Returns",
"the",
"dict",
"containing",
"the",
"field",
"errors",
"information"
]
| python | train |
collectiveacuity/labPack | labpack/storage/aws/s3.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L790-L867 | def delete_bucket(self, bucket_name):
'''
a method to delete a bucket in s3 and all its contents
:param bucket_name: string with name of bucket
:return: string with status of method
'''
title = '%s.delete_bucket' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check for existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
status_msg = 'S3 bucket "%s" does not exist.' % bucket_name
self.iam.printer(status_msg)
return status_msg
# retrieve list of records in bucket
record_keys = []
record_list, next_key = self.list_versions(bucket_name)
for record in record_list:
details = {
'Key': record['key'],
'VersionId': record['version_id']
}
record_keys.append(details)
# delete records in bucket
kw_args = {
'Bucket': bucket_name,
'Delete': { 'Objects': record_keys }
}
if record_keys:
try:
response = self.connection.delete_objects(**kw_args)
except:
raise AWSConnectionError(title)
# continue deleting objects in bucket until empty
if next_key:
while next_key:
record_keys = []
record_list, next_key = self.list_versions(bucket_name, starting_key=next_key['key'], starting_version=next_key['version_id'])
for record in record_list:
details = {
'Key': record['key'],
'VersionId': record['version_id']
}
record_keys.append(details)
kw_args = {
'Bucket': bucket_name,
'Delete': { 'Objects': record_keys }
}
try:
response = self.connection.delete_objects(**kw_args)
except:
raise AWSConnectionError(title)
# send delete bucket request
try:
self.connection.delete_bucket( Bucket=bucket_name )
except:
raise AWSConnectionError(title)
# report result and return true
status_msg = 'S3 bucket "%s" deleted.' % bucket_name
self.iam.printer(status_msg)
return status_msg | [
"def",
"delete_bucket",
"(",
"self",
",",
"bucket_name",
")",
":",
"title",
"=",
"'%s.delete_bucket'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'bucket_name'",
":",
"bucket_name",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# check for existence of bucket",
"if",
"not",
"bucket_name",
"in",
"self",
".",
"bucket_list",
":",
"if",
"not",
"bucket_name",
"in",
"self",
".",
"list_buckets",
"(",
")",
":",
"status_msg",
"=",
"'S3 bucket \"%s\" does not exist.'",
"%",
"bucket_name",
"self",
".",
"iam",
".",
"printer",
"(",
"status_msg",
")",
"return",
"status_msg",
"# retrieve list of records in bucket",
"record_keys",
"=",
"[",
"]",
"record_list",
",",
"next_key",
"=",
"self",
".",
"list_versions",
"(",
"bucket_name",
")",
"for",
"record",
"in",
"record_list",
":",
"details",
"=",
"{",
"'Key'",
":",
"record",
"[",
"'key'",
"]",
",",
"'VersionId'",
":",
"record",
"[",
"'version_id'",
"]",
"}",
"record_keys",
".",
"append",
"(",
"details",
")",
"# delete records in bucket",
"kw_args",
"=",
"{",
"'Bucket'",
":",
"bucket_name",
",",
"'Delete'",
":",
"{",
"'Objects'",
":",
"record_keys",
"}",
"}",
"if",
"record_keys",
":",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"delete_objects",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# continue deleting objects in bucket until empty",
"if",
"next_key",
":",
"while",
"next_key",
":",
"record_keys",
"=",
"[",
"]",
"record_list",
",",
"next_key",
"=",
"self",
".",
"list_versions",
"(",
"bucket_name",
",",
"starting_key",
"=",
"next_key",
"[",
"'key'",
"]",
",",
"starting_version",
"=",
"next_key",
"[",
"'version_id'",
"]",
")",
"for",
"record",
"in",
"record_list",
":",
"details",
"=",
"{",
"'Key'",
":",
"record",
"[",
"'key'",
"]",
",",
"'VersionId'",
":",
"record",
"[",
"'version_id'",
"]",
"}",
"record_keys",
".",
"append",
"(",
"details",
")",
"kw_args",
"=",
"{",
"'Bucket'",
":",
"bucket_name",
",",
"'Delete'",
":",
"{",
"'Objects'",
":",
"record_keys",
"}",
"}",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"delete_objects",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# send delete bucket request",
"try",
":",
"self",
".",
"connection",
".",
"delete_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# report result and return true",
"status_msg",
"=",
"'S3 bucket \"%s\" deleted.'",
"%",
"bucket_name",
"self",
".",
"iam",
".",
"printer",
"(",
"status_msg",
")",
"return",
"status_msg"
]
| a method to delete a bucket in s3 and all its contents
:param bucket_name: string with name of bucket
:return: string with status of method | [
"a",
"method",
"to",
"delete",
"a",
"bucket",
"in",
"s3",
"and",
"all",
"its",
"contents"
]
| python | train |
Tanganelli/CoAPthon3 | coapthon/http_proxy/http_coap_proxy.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/http_proxy/http_coap_proxy.py#L148-L163 | def do_POST(self):
"""
Perform a POST request
"""
# Doesn't do anything with posted data
# print "uri: ", self.client_address, self.path
self.do_initial_operations()
payload = self.coap_uri.get_payload()
if payload is None:
logger.error("BAD POST REQUEST")
self.send_error(BAD_REQUEST)
return
coap_response = self.client.post(self.coap_uri.path, payload)
self.client.stop()
logger.info("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"# Doesn't do anything with posted data",
"# print \"uri: \", self.client_address, self.path",
"self",
".",
"do_initial_operations",
"(",
")",
"payload",
"=",
"self",
".",
"coap_uri",
".",
"get_payload",
"(",
")",
"if",
"payload",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"BAD POST REQUEST\"",
")",
"self",
".",
"send_error",
"(",
"BAD_REQUEST",
")",
"return",
"coap_response",
"=",
"self",
".",
"client",
".",
"post",
"(",
"self",
".",
"coap_uri",
".",
"path",
",",
"payload",
")",
"self",
".",
"client",
".",
"stop",
"(",
")",
"logger",
".",
"info",
"(",
"\"Server response: %s\"",
",",
"coap_response",
".",
"pretty_print",
"(",
")",
")",
"self",
".",
"set_http_response",
"(",
"coap_response",
")"
]
| Perform a POST request | [
"Perform",
"a",
"POST",
"request"
]
| python | train |
metric-learn/metric-learn | metric_learn/mmc.py | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L88-L210 | def _fit_full(self, pairs, y):
"""Learn full metric using MMC.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
constraints : 4-tuple of arrays
(a,b,c,d) indices into X, with (a,b) specifying similar and (c,d)
dissimilar pairs
"""
num_dim = pairs.shape[2]
error1 = error2 = 1e10
eps = 0.01 # error-bound of iterative projection on C1 and C2
A = self.A_
pos_pairs, neg_pairs = pairs[y == 1], pairs[y == -1]
# Create weight vector from similar samples
pos_diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
w = np.einsum('ij,ik->jk', pos_diff, pos_diff).ravel()
# `w` is the sum of all outer products of the rows in `pos_diff`.
# The above `einsum` is equivalent to the much more inefficient:
# w = np.apply_along_axis(
# lambda x: np.outer(x,x).ravel(),
# 1,
# X[a] - X[b]
# ).sum(axis = 0)
t = w.dot(A.ravel()) / 100.0
w_norm = np.linalg.norm(w)
w1 = w / w_norm # make `w` a unit vector
t1 = t / w_norm # distance from origin to `w^T*x=t` plane
cycle = 1
alpha = 0.1 # initial step size along gradient
grad1 = self._fS1(pos_pairs, A) # gradient of similarity
# constraint function
grad2 = self._fD1(neg_pairs, A) # gradient of dissimilarity
# constraint function
M = self._grad_projection(grad1, grad2) # gradient of fD1 orthogonal to fS1
A_old = A.copy()
for cycle in xrange(self.max_iter):
# projection of constraints C1 and C2
satisfy = False
for it in xrange(self.max_proj):
# First constraint:
# f(A) = \sum_{i,j \in S} d_ij' A d_ij <= t (1)
# (1) can be rewritten as a linear constraint: w^T x = t,
# where x is the unrolled matrix of A,
# w is also an unrolled matrix of W where
# W_{kl}= \sum_{i,j \in S}d_ij^k * d_ij^l
x0 = A.ravel()
if w.dot(x0) <= t:
x = x0
else:
x = x0 + (t1 - w1.dot(x0)) * w1
A[:] = x.reshape(num_dim, num_dim)
# Second constraint:
# PSD constraint A >= 0
# project A onto domain A>0
l, V = np.linalg.eigh((A + A.T) / 2)
A[:] = np.dot(V * np.maximum(0, l[None,:]), V.T)
fDC2 = w.dot(A.ravel())
error2 = (fDC2 - t) / t
if error2 < eps:
satisfy = True
break
# third constraint: gradient ascent
# max: g(A) >= 1
# here we suppose g(A) = fD(A) = \sum_{I,J \in D} sqrt(d_ij' A d_ij)
obj_previous = self._fD(neg_pairs, A_old) # g(A_old)
obj = self._fD(neg_pairs, A) # g(A)
if satisfy and (obj > obj_previous or cycle == 0):
# If projection of 1 and 2 is successful, and such projection
# improves objective function, slightly increase learning rate
# and update from the current A.
alpha *= 1.05
A_old[:] = A
grad2 = self._fS1(pos_pairs, A)
grad1 = self._fD1(neg_pairs, A)
M = self._grad_projection(grad1, grad2)
A += alpha * M
else:
# If projection of 1 and 2 failed, or obj <= obj_previous due
# to projection of 1 and 2, shrink learning rate and re-update
# from the previous A.
alpha /= 2
A[:] = A_old + alpha * M
delta = np.linalg.norm(alpha * M) / np.linalg.norm(A_old)
if delta < self.convergence_threshold:
break
if self.verbose:
print('mmc iter: %d, conv = %f, projections = %d' % (cycle, delta, it+1))
if delta > self.convergence_threshold:
self.converged_ = False
if self.verbose:
print('mmc did not converge, conv = %f' % (delta,))
else:
self.converged_ = True
if self.verbose:
print('mmc converged at iter %d, conv = %f' % (cycle, delta))
self.A_[:] = A_old
self.n_iter_ = cycle
self.transformer_ = transformer_from_metric(self.A_)
return self | [
"def",
"_fit_full",
"(",
"self",
",",
"pairs",
",",
"y",
")",
":",
"num_dim",
"=",
"pairs",
".",
"shape",
"[",
"2",
"]",
"error1",
"=",
"error2",
"=",
"1e10",
"eps",
"=",
"0.01",
"# error-bound of iterative projection on C1 and C2",
"A",
"=",
"self",
".",
"A_",
"pos_pairs",
",",
"neg_pairs",
"=",
"pairs",
"[",
"y",
"==",
"1",
"]",
",",
"pairs",
"[",
"y",
"==",
"-",
"1",
"]",
"# Create weight vector from similar samples",
"pos_diff",
"=",
"pos_pairs",
"[",
":",
",",
"0",
",",
":",
"]",
"-",
"pos_pairs",
"[",
":",
",",
"1",
",",
":",
"]",
"w",
"=",
"np",
".",
"einsum",
"(",
"'ij,ik->jk'",
",",
"pos_diff",
",",
"pos_diff",
")",
".",
"ravel",
"(",
")",
"# `w` is the sum of all outer products of the rows in `pos_diff`.",
"# The above `einsum` is equivalent to the much more inefficient:",
"# w = np.apply_along_axis(",
"# lambda x: np.outer(x,x).ravel(),",
"# 1,",
"# X[a] - X[b]",
"# ).sum(axis = 0)",
"t",
"=",
"w",
".",
"dot",
"(",
"A",
".",
"ravel",
"(",
")",
")",
"/",
"100.0",
"w_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"w",
")",
"w1",
"=",
"w",
"/",
"w_norm",
"# make `w` a unit vector",
"t1",
"=",
"t",
"/",
"w_norm",
"# distance from origin to `w^T*x=t` plane",
"cycle",
"=",
"1",
"alpha",
"=",
"0.1",
"# initial step size along gradient",
"grad1",
"=",
"self",
".",
"_fS1",
"(",
"pos_pairs",
",",
"A",
")",
"# gradient of similarity",
"# constraint function",
"grad2",
"=",
"self",
".",
"_fD1",
"(",
"neg_pairs",
",",
"A",
")",
"# gradient of dissimilarity",
"# constraint function",
"M",
"=",
"self",
".",
"_grad_projection",
"(",
"grad1",
",",
"grad2",
")",
"# gradient of fD1 orthogonal to fS1",
"A_old",
"=",
"A",
".",
"copy",
"(",
")",
"for",
"cycle",
"in",
"xrange",
"(",
"self",
".",
"max_iter",
")",
":",
"# projection of constraints C1 and C2",
"satisfy",
"=",
"False",
"for",
"it",
"in",
"xrange",
"(",
"self",
".",
"max_proj",
")",
":",
"# First constraint:",
"# f(A) = \\sum_{i,j \\in S} d_ij' A d_ij <= t (1)",
"# (1) can be rewritten as a linear constraint: w^T x = t,",
"# where x is the unrolled matrix of A,",
"# w is also an unrolled matrix of W where",
"# W_{kl}= \\sum_{i,j \\in S}d_ij^k * d_ij^l",
"x0",
"=",
"A",
".",
"ravel",
"(",
")",
"if",
"w",
".",
"dot",
"(",
"x0",
")",
"<=",
"t",
":",
"x",
"=",
"x0",
"else",
":",
"x",
"=",
"x0",
"+",
"(",
"t1",
"-",
"w1",
".",
"dot",
"(",
"x0",
")",
")",
"*",
"w1",
"A",
"[",
":",
"]",
"=",
"x",
".",
"reshape",
"(",
"num_dim",
",",
"num_dim",
")",
"# Second constraint:",
"# PSD constraint A >= 0",
"# project A onto domain A>0",
"l",
",",
"V",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"(",
"A",
"+",
"A",
".",
"T",
")",
"/",
"2",
")",
"A",
"[",
":",
"]",
"=",
"np",
".",
"dot",
"(",
"V",
"*",
"np",
".",
"maximum",
"(",
"0",
",",
"l",
"[",
"None",
",",
":",
"]",
")",
",",
"V",
".",
"T",
")",
"fDC2",
"=",
"w",
".",
"dot",
"(",
"A",
".",
"ravel",
"(",
")",
")",
"error2",
"=",
"(",
"fDC2",
"-",
"t",
")",
"/",
"t",
"if",
"error2",
"<",
"eps",
":",
"satisfy",
"=",
"True",
"break",
"# third constraint: gradient ascent",
"# max: g(A) >= 1",
"# here we suppose g(A) = fD(A) = \\sum_{I,J \\in D} sqrt(d_ij' A d_ij)",
"obj_previous",
"=",
"self",
".",
"_fD",
"(",
"neg_pairs",
",",
"A_old",
")",
"# g(A_old)",
"obj",
"=",
"self",
".",
"_fD",
"(",
"neg_pairs",
",",
"A",
")",
"# g(A)",
"if",
"satisfy",
"and",
"(",
"obj",
">",
"obj_previous",
"or",
"cycle",
"==",
"0",
")",
":",
"# If projection of 1 and 2 is successful, and such projection",
"# improves objective function, slightly increase learning rate",
"# and update from the current A.",
"alpha",
"*=",
"1.05",
"A_old",
"[",
":",
"]",
"=",
"A",
"grad2",
"=",
"self",
".",
"_fS1",
"(",
"pos_pairs",
",",
"A",
")",
"grad1",
"=",
"self",
".",
"_fD1",
"(",
"neg_pairs",
",",
"A",
")",
"M",
"=",
"self",
".",
"_grad_projection",
"(",
"grad1",
",",
"grad2",
")",
"A",
"+=",
"alpha",
"*",
"M",
"else",
":",
"# If projection of 1 and 2 failed, or obj <= obj_previous due",
"# to projection of 1 and 2, shrink learning rate and re-update",
"# from the previous A.",
"alpha",
"/=",
"2",
"A",
"[",
":",
"]",
"=",
"A_old",
"+",
"alpha",
"*",
"M",
"delta",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"alpha",
"*",
"M",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"A_old",
")",
"if",
"delta",
"<",
"self",
".",
"convergence_threshold",
":",
"break",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'mmc iter: %d, conv = %f, projections = %d'",
"%",
"(",
"cycle",
",",
"delta",
",",
"it",
"+",
"1",
")",
")",
"if",
"delta",
">",
"self",
".",
"convergence_threshold",
":",
"self",
".",
"converged_",
"=",
"False",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'mmc did not converge, conv = %f'",
"%",
"(",
"delta",
",",
")",
")",
"else",
":",
"self",
".",
"converged_",
"=",
"True",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'mmc converged at iter %d, conv = %f'",
"%",
"(",
"cycle",
",",
"delta",
")",
")",
"self",
".",
"A_",
"[",
":",
"]",
"=",
"A_old",
"self",
".",
"n_iter_",
"=",
"cycle",
"self",
".",
"transformer_",
"=",
"transformer_from_metric",
"(",
"self",
".",
"A_",
")",
"return",
"self"
]
| Learn full metric using MMC.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
constraints : 4-tuple of arrays
(a,b,c,d) indices into X, with (a,b) specifying similar and (c,d)
dissimilar pairs | [
"Learn",
"full",
"metric",
"using",
"MMC",
"."
]
| python | train |
saltstack/salt | salt/modules/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L2656-L2711 | def get_profiles(hypervisor=None, **kwargs):
'''
Return the virt profiles for hypervisor.
Currently there are profiles for:
- nic
- disk
:param hypervisor: override the default machine type.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_profiles
salt '*' virt.get_profiles hypervisor=esxi
'''
ret = {}
caps = capabilities(**kwargs)
hypervisors = sorted({x for y in [guest['arch']['domains'].keys() for guest in caps['guests']] for x in y})
default_hypervisor = 'kvm' if 'kvm' in hypervisors else hypervisors[0]
if not hypervisor:
hypervisor = __salt__['config.get']('libvirt:hypervisor')
if hypervisor is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt:hypervisor\' configuration property has been deprecated. '
'Rather use the \'virt:connection:uri\' to properly define the libvirt '
'URI or alias of the host to connect to. \'libvirt:hypervisor\' will '
'stop being used in {version}.'
)
else:
# Use the machine types as possible values
# Prefer 'kvm' over the others if available
hypervisor = default_hypervisor
virtconf = __salt__['config.get']('virt', {})
for typ in ['disk', 'nic']:
_func = getattr(sys.modules[__name__], '_{0}_profile'.format(typ))
ret[typ] = {'default': _func('default', hypervisor)}
if typ in virtconf:
ret.setdefault(typ, {})
for prf in virtconf[typ]:
ret[typ][prf] = _func(prf, hypervisor)
return ret | [
"def",
"get_profiles",
"(",
"hypervisor",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"caps",
"=",
"capabilities",
"(",
"*",
"*",
"kwargs",
")",
"hypervisors",
"=",
"sorted",
"(",
"{",
"x",
"for",
"y",
"in",
"[",
"guest",
"[",
"'arch'",
"]",
"[",
"'domains'",
"]",
".",
"keys",
"(",
")",
"for",
"guest",
"in",
"caps",
"[",
"'guests'",
"]",
"]",
"for",
"x",
"in",
"y",
"}",
")",
"default_hypervisor",
"=",
"'kvm'",
"if",
"'kvm'",
"in",
"hypervisors",
"else",
"hypervisors",
"[",
"0",
"]",
"if",
"not",
"hypervisor",
":",
"hypervisor",
"=",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'libvirt:hypervisor'",
")",
"if",
"hypervisor",
"is",
"not",
"None",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'\\'libvirt:hypervisor\\' configuration property has been deprecated. '",
"'Rather use the \\'virt:connection:uri\\' to properly define the libvirt '",
"'URI or alias of the host to connect to. \\'libvirt:hypervisor\\' will '",
"'stop being used in {version}.'",
")",
"else",
":",
"# Use the machine types as possible values",
"# Prefer 'kvm' over the others if available",
"hypervisor",
"=",
"default_hypervisor",
"virtconf",
"=",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'virt'",
",",
"{",
"}",
")",
"for",
"typ",
"in",
"[",
"'disk'",
",",
"'nic'",
"]",
":",
"_func",
"=",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"'_{0}_profile'",
".",
"format",
"(",
"typ",
")",
")",
"ret",
"[",
"typ",
"]",
"=",
"{",
"'default'",
":",
"_func",
"(",
"'default'",
",",
"hypervisor",
")",
"}",
"if",
"typ",
"in",
"virtconf",
":",
"ret",
".",
"setdefault",
"(",
"typ",
",",
"{",
"}",
")",
"for",
"prf",
"in",
"virtconf",
"[",
"typ",
"]",
":",
"ret",
"[",
"typ",
"]",
"[",
"prf",
"]",
"=",
"_func",
"(",
"prf",
",",
"hypervisor",
")",
"return",
"ret"
]
| Return the virt profiles for hypervisor.
Currently there are profiles for:
- nic
- disk
:param hypervisor: override the default machine type.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_profiles
salt '*' virt.get_profiles hypervisor=esxi | [
"Return",
"the",
"virt",
"profiles",
"for",
"hypervisor",
"."
]
| python | train |
michael-lazar/rtv | rtv/packages/praw/objects.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L1432-L1448 | def sticky(self, bottom=True):
"""Sticky a post in its subreddit.
If there is already a stickied post in the designated slot it will be
unstickied.
:param bottom: Set this as the top or bottom sticky. If no top sticky
exists, this submission will become the top sticky regardless.
:returns: The json response from the server
"""
url = self.reddit_session.config['sticky_submission']
data = {'id': self.fullname, 'state': True}
if not bottom:
data['num'] = 1
return self.reddit_session.request_json(url, data=data) | [
"def",
"sticky",
"(",
"self",
",",
"bottom",
"=",
"True",
")",
":",
"url",
"=",
"self",
".",
"reddit_session",
".",
"config",
"[",
"'sticky_submission'",
"]",
"data",
"=",
"{",
"'id'",
":",
"self",
".",
"fullname",
",",
"'state'",
":",
"True",
"}",
"if",
"not",
"bottom",
":",
"data",
"[",
"'num'",
"]",
"=",
"1",
"return",
"self",
".",
"reddit_session",
".",
"request_json",
"(",
"url",
",",
"data",
"=",
"data",
")"
]
| Sticky a post in its subreddit.
If there is already a stickied post in the designated slot it will be
unstickied.
:param bottom: Set this as the top or bottom sticky. If no top sticky
exists, this submission will become the top sticky regardless.
:returns: The json response from the server | [
"Sticky",
"a",
"post",
"in",
"its",
"subreddit",
"."
]
| python | train |
tamasgal/km3pipe | km3pipe/db.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L645-L648 | def unit(self, parameter):
"Get the unit for given parameter"
parameter = self._get_parameter_name(parameter).lower()
return self._parameters[parameter]['Unit'] | [
"def",
"unit",
"(",
"self",
",",
"parameter",
")",
":",
"parameter",
"=",
"self",
".",
"_get_parameter_name",
"(",
"parameter",
")",
".",
"lower",
"(",
")",
"return",
"self",
".",
"_parameters",
"[",
"parameter",
"]",
"[",
"'Unit'",
"]"
]
| Get the unit for given parameter | [
"Get",
"the",
"unit",
"for",
"given",
"parameter"
]
| python | train |
blockchain/api-v1-client-python | blockchain/blockexplorer.py | https://github.com/blockchain/api-v1-client-python/blob/52ea562f824f04303e75239364e06722bec8620f/blockchain/blockexplorer.py#L228-L250 | def get_blocks(time=None, pool_name=None, api_code=None):
"""Get a list of blocks for a specific day or mining pool.
Both parameters are optional but at least one is required.
:param int time: time in milliseconds
:param str pool_name: name of the mining pool
:param str api_code: Blockchain.info API code (optional)
:return: an array of :class:`SimpleBlock` objects
"""
resource = 'blocks/{0}?format=json'
if api_code is not None:
resource += '&api_code=' + api_code
if time is not None:
resource = resource.format(time)
elif pool_name is not None:
resource = resource.format(pool_name)
else:
resource = resource.format('')
response = util.call_api(resource)
json_response = json.loads(response)
return [SimpleBlock(b) for b in json_response['blocks']] | [
"def",
"get_blocks",
"(",
"time",
"=",
"None",
",",
"pool_name",
"=",
"None",
",",
"api_code",
"=",
"None",
")",
":",
"resource",
"=",
"'blocks/{0}?format=json'",
"if",
"api_code",
"is",
"not",
"None",
":",
"resource",
"+=",
"'&api_code='",
"+",
"api_code",
"if",
"time",
"is",
"not",
"None",
":",
"resource",
"=",
"resource",
".",
"format",
"(",
"time",
")",
"elif",
"pool_name",
"is",
"not",
"None",
":",
"resource",
"=",
"resource",
".",
"format",
"(",
"pool_name",
")",
"else",
":",
"resource",
"=",
"resource",
".",
"format",
"(",
"''",
")",
"response",
"=",
"util",
".",
"call_api",
"(",
"resource",
")",
"json_response",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"return",
"[",
"SimpleBlock",
"(",
"b",
")",
"for",
"b",
"in",
"json_response",
"[",
"'blocks'",
"]",
"]"
]
| Get a list of blocks for a specific day or mining pool.
Both parameters are optional but at least one is required.
:param int time: time in milliseconds
:param str pool_name: name of the mining pool
:param str api_code: Blockchain.info API code (optional)
:return: an array of :class:`SimpleBlock` objects | [
"Get",
"a",
"list",
"of",
"blocks",
"for",
"a",
"specific",
"day",
"or",
"mining",
"pool",
".",
"Both",
"parameters",
"are",
"optional",
"but",
"at",
"least",
"one",
"is",
"required",
"."
]
| python | train |
riga/tfdeploy | tfdeploy.py | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2077-L2082 | def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)), | [
"def",
"Softmax",
"(",
"a",
")",
":",
"e",
"=",
"np",
".",
"exp",
"(",
"a",
")",
"return",
"np",
".",
"divide",
"(",
"e",
",",
"np",
".",
"sum",
"(",
"e",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
")",
","
]
| Softmax op. | [
"Softmax",
"op",
"."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/utils/t2t_model.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2214-L2224 | def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter)) | [
"def",
"set_custom_getter_compose",
"(",
"custom_getter",
")",
":",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"set_custom_getter",
"(",
"_compose_custom_getters",
"(",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"custom_getter",
",",
"custom_getter",
")",
")"
]
| Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter. | [
"Set",
"a",
"custom",
"getter",
"in",
"the",
"current",
"variable",
"scope",
"."
]
| python | train |
katerina7479/pypdflite | pypdflite/pdfdocument.py | https://github.com/katerina7479/pypdflite/blob/ac2501f30d6619eae9dea5644717575ca9263d0a/pypdflite/pdfdocument.py#L71-L86 | def _set_color_scheme(self, draw_color=None, fill_color=None, text_color=None):
""" Default color object is black letters
& black lines."""
if draw_color is None:
draw_color = PDFColor()
draw_color._set_type('d')
if fill_color is None:
fill_color = PDFColor()
fill_color._set_type('f')
if text_color is None:
text_color = PDFColor()
text_color._set_type('t')
self.draw_color = draw_color
self.fill_color = fill_color
self.text_color = text_color | [
"def",
"_set_color_scheme",
"(",
"self",
",",
"draw_color",
"=",
"None",
",",
"fill_color",
"=",
"None",
",",
"text_color",
"=",
"None",
")",
":",
"if",
"draw_color",
"is",
"None",
":",
"draw_color",
"=",
"PDFColor",
"(",
")",
"draw_color",
".",
"_set_type",
"(",
"'d'",
")",
"if",
"fill_color",
"is",
"None",
":",
"fill_color",
"=",
"PDFColor",
"(",
")",
"fill_color",
".",
"_set_type",
"(",
"'f'",
")",
"if",
"text_color",
"is",
"None",
":",
"text_color",
"=",
"PDFColor",
"(",
")",
"text_color",
".",
"_set_type",
"(",
"'t'",
")",
"self",
".",
"draw_color",
"=",
"draw_color",
"self",
".",
"fill_color",
"=",
"fill_color",
"self",
".",
"text_color",
"=",
"text_color"
]
| Default color object is black letters
& black lines. | [
"Default",
"color",
"object",
"is",
"black",
"letters",
"&",
"black",
"lines",
"."
]
| python | test |
tanghaibao/goatools | goatools/base.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/base.py#L175-L187 | def http_get(url, fout=None):
"""Download a file from http. Save it in a file named by fout"""
print('requests.get({URL}, stream=True)'.format(URL=url))
rsp = requests.get(url, stream=True)
if rsp.status_code == 200 and fout is not None:
with open(fout, 'wb') as prt:
for chunk in rsp: # .iter_content(chunk_size=128):
prt.write(chunk)
print(' WROTE: {F}\n'.format(F=fout))
else:
print(rsp.status_code, rsp.reason, url)
print(rsp.content)
return rsp | [
"def",
"http_get",
"(",
"url",
",",
"fout",
"=",
"None",
")",
":",
"print",
"(",
"'requests.get({URL}, stream=True)'",
".",
"format",
"(",
"URL",
"=",
"url",
")",
")",
"rsp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"rsp",
".",
"status_code",
"==",
"200",
"and",
"fout",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"fout",
",",
"'wb'",
")",
"as",
"prt",
":",
"for",
"chunk",
"in",
"rsp",
":",
"# .iter_content(chunk_size=128):",
"prt",
".",
"write",
"(",
"chunk",
")",
"print",
"(",
"' WROTE: {F}\\n'",
".",
"format",
"(",
"F",
"=",
"fout",
")",
")",
"else",
":",
"print",
"(",
"rsp",
".",
"status_code",
",",
"rsp",
".",
"reason",
",",
"url",
")",
"print",
"(",
"rsp",
".",
"content",
")",
"return",
"rsp"
]
| Download a file from http. Save it in a file named by fout | [
"Download",
"a",
"file",
"from",
"http",
".",
"Save",
"it",
"in",
"a",
"file",
"named",
"by",
"fout"
]
| python | train |
projectatomic/atomic-reactor | atomic_reactor/plugins/pre_reactor_config.py | https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/pre_reactor_config.py#L26-L42 | def get_config(workflow):
"""
Obtain configuration object
Does not fail
:return: ReactorConfig instance
"""
try:
workspace = workflow.plugin_workspace[ReactorConfigPlugin.key]
return workspace[WORKSPACE_CONF_KEY]
except KeyError:
# The plugin did not run or was not successful: use defaults
conf = ReactorConfig()
workspace = workflow.plugin_workspace.get(ReactorConfigPlugin.key, {})
workspace[WORKSPACE_CONF_KEY] = conf
workflow.plugin_workspace[ReactorConfigPlugin.key] = workspace
return conf | [
"def",
"get_config",
"(",
"workflow",
")",
":",
"try",
":",
"workspace",
"=",
"workflow",
".",
"plugin_workspace",
"[",
"ReactorConfigPlugin",
".",
"key",
"]",
"return",
"workspace",
"[",
"WORKSPACE_CONF_KEY",
"]",
"except",
"KeyError",
":",
"# The plugin did not run or was not successful: use defaults",
"conf",
"=",
"ReactorConfig",
"(",
")",
"workspace",
"=",
"workflow",
".",
"plugin_workspace",
".",
"get",
"(",
"ReactorConfigPlugin",
".",
"key",
",",
"{",
"}",
")",
"workspace",
"[",
"WORKSPACE_CONF_KEY",
"]",
"=",
"conf",
"workflow",
".",
"plugin_workspace",
"[",
"ReactorConfigPlugin",
".",
"key",
"]",
"=",
"workspace",
"return",
"conf"
]
| Obtain configuration object
Does not fail
:return: ReactorConfig instance | [
"Obtain",
"configuration",
"object",
"Does",
"not",
"fail"
]
| python | train |
Datary/scrapbag | scrapbag/geo/__init__.py | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/geo/__init__.py#L35-L46 | def get_location(address=""):
"""
Retrieve location coordinates from an address introduced.
"""
coordinates = None
try:
geolocator = Nominatim()
location = geolocator.geocode(address)
coordinates = (location.latitude, location.longitude)
except Exception as ex:
logger.error('Fail get location - {}'.format(ex))
return coordinates | [
"def",
"get_location",
"(",
"address",
"=",
"\"\"",
")",
":",
"coordinates",
"=",
"None",
"try",
":",
"geolocator",
"=",
"Nominatim",
"(",
")",
"location",
"=",
"geolocator",
".",
"geocode",
"(",
"address",
")",
"coordinates",
"=",
"(",
"location",
".",
"latitude",
",",
"location",
".",
"longitude",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"'Fail get location - {}'",
".",
"format",
"(",
"ex",
")",
")",
"return",
"coordinates"
]
| Retrieve location coordinates from an address introduced. | [
"Retrieve",
"location",
"coordinates",
"from",
"an",
"address",
"introduced",
"."
]
| python | train |
saltstack/salt | salt/cli/daemons.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L512-L525 | def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
'''
if hasattr(self, 'minion') and 'proxymodule' in self.minion.opts:
proxy_fn = self.minion.opts['proxymodule'].loaded_base_name + '.shutdown'
self.minion.opts['proxymodule'][proxy_fn](self.minion.opts)
self.action_log_info('Shutting down')
super(ProxyMinion, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
self.__class__.__name__, (exitmsg or '')).strip())) | [
"def",
"shutdown",
"(",
"self",
",",
"exitcode",
"=",
"0",
",",
"exitmsg",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'minion'",
")",
"and",
"'proxymodule'",
"in",
"self",
".",
"minion",
".",
"opts",
":",
"proxy_fn",
"=",
"self",
".",
"minion",
".",
"opts",
"[",
"'proxymodule'",
"]",
".",
"loaded_base_name",
"+",
"'.shutdown'",
"self",
".",
"minion",
".",
"opts",
"[",
"'proxymodule'",
"]",
"[",
"proxy_fn",
"]",
"(",
"self",
".",
"minion",
".",
"opts",
")",
"self",
".",
"action_log_info",
"(",
"'Shutting down'",
")",
"super",
"(",
"ProxyMinion",
",",
"self",
")",
".",
"shutdown",
"(",
"exitcode",
",",
"(",
"'The Salt {0} is shutdown. {1}'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"(",
"exitmsg",
"or",
"''",
")",
")",
".",
"strip",
"(",
")",
")",
")"
]
| If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg | [
"If",
"sub",
"-",
"classed",
"run",
"any",
"shutdown",
"operations",
"on",
"this",
"method",
"."
]
| python | train |
ccxt/ccxt | python/ccxt/async_support/base/exchange.py | https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/async_support/base/exchange.py#L117-L168 | async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
response = None
http_response = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, response.status, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, headers, http_response)
except socket.gaierror as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except concurrent.futures._base.TimeoutError as e:
self.raise_error(RequestTimeout, method, url, e, None)
except aiohttp.client_exceptions.ClientConnectionError as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except aiohttp.client_exceptions.ClientError as e: # base exception class
self.raise_error(ExchangeError, url, method, e, None)
self.handle_errors(response.status, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(None, response.status, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response | [
"async",
"def",
"fetch",
"(",
"self",
",",
"url",
",",
"method",
"=",
"'GET'",
",",
"headers",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"request_headers",
"=",
"self",
".",
"prepare_request_headers",
"(",
"headers",
")",
"url",
"=",
"self",
".",
"proxy",
"+",
"url",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"\\nRequest:\"",
",",
"method",
",",
"url",
",",
"headers",
",",
"body",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s %s, Request: %s %s\"",
",",
"method",
",",
"url",
",",
"headers",
",",
"body",
")",
"encoded_body",
"=",
"body",
".",
"encode",
"(",
")",
"if",
"body",
"else",
"None",
"session_method",
"=",
"getattr",
"(",
"self",
".",
"session",
",",
"method",
".",
"lower",
"(",
")",
")",
"response",
"=",
"None",
"http_response",
"=",
"None",
"json_response",
"=",
"None",
"try",
":",
"async",
"with",
"session_method",
"(",
"yarl",
".",
"URL",
"(",
"url",
",",
"encoded",
"=",
"True",
")",
",",
"data",
"=",
"encoded_body",
",",
"headers",
"=",
"request_headers",
",",
"timeout",
"=",
"(",
"self",
".",
"timeout",
"/",
"1000",
")",
",",
"proxy",
"=",
"self",
".",
"aiohttp_proxy",
")",
"as",
"response",
":",
"http_response",
"=",
"await",
"response",
".",
"text",
"(",
")",
"json_response",
"=",
"self",
".",
"parse_json",
"(",
"http_response",
")",
"if",
"self",
".",
"is_json_encoded_object",
"(",
"http_response",
")",
"else",
"None",
"headers",
"=",
"response",
".",
"headers",
"if",
"self",
".",
"enableLastHttpResponse",
":",
"self",
".",
"last_http_response",
"=",
"http_response",
"if",
"self",
".",
"enableLastResponseHeaders",
":",
"self",
".",
"last_response_headers",
"=",
"headers",
"if",
"self",
".",
"enableLastJsonResponse",
":",
"self",
".",
"last_json_response",
"=",
"json_response",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"\\nResponse:\"",
",",
"method",
",",
"url",
",",
"response",
".",
"status",
",",
"headers",
",",
"http_response",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s %s, Response: %s %s %s\"",
",",
"method",
",",
"url",
",",
"response",
".",
"status",
",",
"headers",
",",
"http_response",
")",
"except",
"socket",
".",
"gaierror",
"as",
"e",
":",
"self",
".",
"raise_error",
"(",
"ExchangeNotAvailable",
",",
"url",
",",
"method",
",",
"e",
",",
"None",
")",
"except",
"concurrent",
".",
"futures",
".",
"_base",
".",
"TimeoutError",
"as",
"e",
":",
"self",
".",
"raise_error",
"(",
"RequestTimeout",
",",
"method",
",",
"url",
",",
"e",
",",
"None",
")",
"except",
"aiohttp",
".",
"client_exceptions",
".",
"ClientConnectionError",
"as",
"e",
":",
"self",
".",
"raise_error",
"(",
"ExchangeNotAvailable",
",",
"url",
",",
"method",
",",
"e",
",",
"None",
")",
"except",
"aiohttp",
".",
"client_exceptions",
".",
"ClientError",
"as",
"e",
":",
"# base exception class",
"self",
".",
"raise_error",
"(",
"ExchangeError",
",",
"url",
",",
"method",
",",
"e",
",",
"None",
")",
"self",
".",
"handle_errors",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"url",
",",
"method",
",",
"headers",
",",
"http_response",
",",
"json_response",
")",
"self",
".",
"handle_rest_errors",
"(",
"None",
",",
"response",
".",
"status",
",",
"http_response",
",",
"url",
",",
"method",
")",
"self",
".",
"handle_rest_response",
"(",
"http_response",
",",
"json_response",
",",
"url",
",",
"method",
",",
"headers",
",",
"body",
")",
"if",
"json_response",
"is",
"not",
"None",
":",
"return",
"json_response",
"return",
"http_response"
]
| Perform a HTTP request and return decoded JSON data | [
"Perform",
"a",
"HTTP",
"request",
"and",
"return",
"decoded",
"JSON",
"data"
]
| python | train |
croscon/fleaker | fleaker/orm.py | https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/orm.py#L83-L116 | def _discover_ideal_backend(orm_backend):
"""Auto-discover the ideal backend based on what is installed.
Right now, handles discovery of:
* PeeWee
* SQLAlchemy
Args:
orm_backend (str): The ``orm_backend`` value that was passed to the
``create_app`` function. That is, the ORM Backend the User
indicated they wanted to use.
Returns:
str|fleaker.missing.MissingSentinel: Returns a string for the ideal
backend if it found one, or :obj:`fleaker.MISSING` if we couldn't
find one.
Raises:
RuntimeError: Raised if no user provided ORM Backend is given and BOTH
PeeWee and SQLAlchemy are installed.
"""
if orm_backend:
return orm_backend
if peewee is not MISSING and sqlalchemy is not MISSING:
raise RuntimeError('Both PeeWee and SQLAlchemy detected as installed, '
'but no explicit backend provided! Please specify '
'one!')
if peewee is not MISSING:
return _PEEWEE_BACKEND
elif sqlalchemy is not MISSING:
return _SQLALCHEMY_BACKEND
else:
return MISSING | [
"def",
"_discover_ideal_backend",
"(",
"orm_backend",
")",
":",
"if",
"orm_backend",
":",
"return",
"orm_backend",
"if",
"peewee",
"is",
"not",
"MISSING",
"and",
"sqlalchemy",
"is",
"not",
"MISSING",
":",
"raise",
"RuntimeError",
"(",
"'Both PeeWee and SQLAlchemy detected as installed, '",
"'but no explicit backend provided! Please specify '",
"'one!'",
")",
"if",
"peewee",
"is",
"not",
"MISSING",
":",
"return",
"_PEEWEE_BACKEND",
"elif",
"sqlalchemy",
"is",
"not",
"MISSING",
":",
"return",
"_SQLALCHEMY_BACKEND",
"else",
":",
"return",
"MISSING"
]
| Auto-discover the ideal backend based on what is installed.
Right now, handles discovery of:
* PeeWee
* SQLAlchemy
Args:
orm_backend (str): The ``orm_backend`` value that was passed to the
``create_app`` function. That is, the ORM Backend the User
indicated they wanted to use.
Returns:
str|fleaker.missing.MissingSentinel: Returns a string for the ideal
backend if it found one, or :obj:`fleaker.MISSING` if we couldn't
find one.
Raises:
RuntimeError: Raised if no user provided ORM Backend is given and BOTH
PeeWee and SQLAlchemy are installed. | [
"Auto",
"-",
"discover",
"the",
"ideal",
"backend",
"based",
"on",
"what",
"is",
"installed",
"."
]
| python | train |
kislyuk/ensure | ensure/main.py | https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L369-L378 | def is_none_or(self):
"""
Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::
Ensure(None).is_none_or.is_an(int)
"""
if self._subject is None:
return NoOpInspector(subject=self._subject, error_factory=self._error_factory)
else:
return self | [
"def",
"is_none_or",
"(",
"self",
")",
":",
"if",
"self",
".",
"_subject",
"is",
"None",
":",
"return",
"NoOpInspector",
"(",
"subject",
"=",
"self",
".",
"_subject",
",",
"error_factory",
"=",
"self",
".",
"_error_factory",
")",
"else",
":",
"return",
"self"
]
| Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::
Ensure(None).is_none_or.is_an(int) | [
"Ensures",
":",
"attr",
":",
"subject",
"is",
"either",
"None",
"or",
"satisfies",
"subsequent",
"(",
"chained",
")",
"conditions",
"::"
]
| python | train |
sci-bots/svg-model | svg_model/merge.py | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/merge.py#L56-L97 | def merge_svg_layers(svg_sources, share_transform=True):
'''
Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object containing merge XML document.
'''
# Get list of XML layers.
(width, height), layers = get_svg_layers(svg_sources)
if share_transform:
transforms = [layer_i.attrib['transform'] for layer_i in layers
if 'transform' in layer_i.attrib]
if len(transforms) > 1:
raise ValueError('Transform can only be shared if *exactly one* '
'layer has a transform ({} layers have '
'`transform` attributes)'.format(len(transforms)))
elif transforms:
# Apply single common transform to all layers.
for layer_i in layers:
layer_i.attrib['transform'] = transforms[0]
# Create blank XML output document.
dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height))
# Add append layers to output XML root element.
output_svg_root = etree.fromstring(dwg.tostring())
output_svg_root.extend(layers)
# Write merged XML document to output file-like object.
output = StringIO.StringIO()
output.write(etree.tostring(output_svg_root))
output.seek(0)
return output | [
"def",
"merge_svg_layers",
"(",
"svg_sources",
",",
"share_transform",
"=",
"True",
")",
":",
"# Get list of XML layers.",
"(",
"width",
",",
"height",
")",
",",
"layers",
"=",
"get_svg_layers",
"(",
"svg_sources",
")",
"if",
"share_transform",
":",
"transforms",
"=",
"[",
"layer_i",
".",
"attrib",
"[",
"'transform'",
"]",
"for",
"layer_i",
"in",
"layers",
"if",
"'transform'",
"in",
"layer_i",
".",
"attrib",
"]",
"if",
"len",
"(",
"transforms",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Transform can only be shared if *exactly one* '",
"'layer has a transform ({} layers have '",
"'`transform` attributes)'",
".",
"format",
"(",
"len",
"(",
"transforms",
")",
")",
")",
"elif",
"transforms",
":",
"# Apply single common transform to all layers.",
"for",
"layer_i",
"in",
"layers",
":",
"layer_i",
".",
"attrib",
"[",
"'transform'",
"]",
"=",
"transforms",
"[",
"0",
"]",
"# Create blank XML output document.",
"dwg",
"=",
"svgwrite",
".",
"Drawing",
"(",
"profile",
"=",
"'tiny'",
",",
"debug",
"=",
"False",
",",
"size",
"=",
"(",
"width",
",",
"height",
")",
")",
"# Add append layers to output XML root element.",
"output_svg_root",
"=",
"etree",
".",
"fromstring",
"(",
"dwg",
".",
"tostring",
"(",
")",
")",
"output_svg_root",
".",
"extend",
"(",
"layers",
")",
"# Write merged XML document to output file-like object.",
"output",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"output",
".",
"write",
"(",
"etree",
".",
"tostring",
"(",
"output_svg_root",
")",
")",
"output",
".",
"seek",
"(",
"0",
")",
"return",
"output"
]
| Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object containing merge XML document. | [
"Merge",
"layers",
"from",
"input",
"svg",
"sources",
"into",
"a",
"single",
"XML",
"document",
"."
]
| python | train |
iotile/coretools | iotilecore/iotile/core/utilities/linebuffer_ui.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/linebuffer_ui.py#L62-L70 | def run(self, refresh_interval=0.05):
"""Set up the loop, check that the tool is installed"""
try:
from asciimatics.screen import Screen
except ImportError:
raise ExternalError("You must have asciimatics installed to use LinebufferUI",
suggestion="pip install iotilecore[ui]")
Screen.wrapper(self._run_loop, arguments=[refresh_interval]) | [
"def",
"run",
"(",
"self",
",",
"refresh_interval",
"=",
"0.05",
")",
":",
"try",
":",
"from",
"asciimatics",
".",
"screen",
"import",
"Screen",
"except",
"ImportError",
":",
"raise",
"ExternalError",
"(",
"\"You must have asciimatics installed to use LinebufferUI\"",
",",
"suggestion",
"=",
"\"pip install iotilecore[ui]\"",
")",
"Screen",
".",
"wrapper",
"(",
"self",
".",
"_run_loop",
",",
"arguments",
"=",
"[",
"refresh_interval",
"]",
")"
]
| Set up the loop, check that the tool is installed | [
"Set",
"up",
"the",
"loop",
"check",
"that",
"the",
"tool",
"is",
"installed"
]
| python | train |
tensorforce/tensorforce | tensorforce/agents/agent.py | https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/agents/agent.py#L166-L197 | def observe(self, terminal, reward, index=0):
"""
Observe experience from the environment to learn from. Optionally pre-processes rewards
Child classes should call super to get the processed reward
EX: terminal, reward = super()...
Args:
terminal (bool): boolean indicating if the episode terminated after the observation.
reward (float): scalar reward that resulted from executing the action.
"""
self.current_terminal = terminal
self.current_reward = reward
if self.batched_observe:
# Batched observe for better performance with Python.
self.observe_terminal[index].append(self.current_terminal)
self.observe_reward[index].append(self.current_reward)
if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity:
self.episode = self.model.observe(
terminal=self.observe_terminal[index],
reward=self.observe_reward[index],
index=index
)
self.observe_terminal[index] = list()
self.observe_reward[index] = list()
else:
self.episode = self.model.observe(
terminal=self.current_terminal,
reward=self.current_reward
) | [
"def",
"observe",
"(",
"self",
",",
"terminal",
",",
"reward",
",",
"index",
"=",
"0",
")",
":",
"self",
".",
"current_terminal",
"=",
"terminal",
"self",
".",
"current_reward",
"=",
"reward",
"if",
"self",
".",
"batched_observe",
":",
"# Batched observe for better performance with Python.",
"self",
".",
"observe_terminal",
"[",
"index",
"]",
".",
"append",
"(",
"self",
".",
"current_terminal",
")",
"self",
".",
"observe_reward",
"[",
"index",
"]",
".",
"append",
"(",
"self",
".",
"current_reward",
")",
"if",
"self",
".",
"current_terminal",
"or",
"len",
"(",
"self",
".",
"observe_terminal",
"[",
"index",
"]",
")",
">=",
"self",
".",
"batching_capacity",
":",
"self",
".",
"episode",
"=",
"self",
".",
"model",
".",
"observe",
"(",
"terminal",
"=",
"self",
".",
"observe_terminal",
"[",
"index",
"]",
",",
"reward",
"=",
"self",
".",
"observe_reward",
"[",
"index",
"]",
",",
"index",
"=",
"index",
")",
"self",
".",
"observe_terminal",
"[",
"index",
"]",
"=",
"list",
"(",
")",
"self",
".",
"observe_reward",
"[",
"index",
"]",
"=",
"list",
"(",
")",
"else",
":",
"self",
".",
"episode",
"=",
"self",
".",
"model",
".",
"observe",
"(",
"terminal",
"=",
"self",
".",
"current_terminal",
",",
"reward",
"=",
"self",
".",
"current_reward",
")"
]
| Observe experience from the environment to learn from. Optionally pre-processes rewards
Child classes should call super to get the processed reward
EX: terminal, reward = super()...
Args:
terminal (bool): boolean indicating if the episode terminated after the observation.
reward (float): scalar reward that resulted from executing the action. | [
"Observe",
"experience",
"from",
"the",
"environment",
"to",
"learn",
"from",
".",
"Optionally",
"pre",
"-",
"processes",
"rewards",
"Child",
"classes",
"should",
"call",
"super",
"to",
"get",
"the",
"processed",
"reward",
"EX",
":",
"terminal",
"reward",
"=",
"super",
"()",
"..."
]
| python | valid |
log2timeline/plaso | plaso/preprocessors/windows.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/preprocessors/windows.py#L226-L244 | def _ParseValueData(self, knowledge_base, value_data):
"""Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
if not knowledge_base.GetHostname():
hostname_artifact = artifacts.HostnameArtifact(name=value_data)
knowledge_base.SetHostname(hostname_artifact) | [
"def",
"_ParseValueData",
"(",
"self",
",",
"knowledge_base",
",",
"value_data",
")",
":",
"if",
"not",
"isinstance",
"(",
"value_data",
",",
"py2to3",
".",
"UNICODE_TYPE",
")",
":",
"raise",
"errors",
".",
"PreProcessFail",
"(",
"'Unsupported Windows Registry value type: {0:s} for '",
"'artifact: {1:s}.'",
".",
"format",
"(",
"type",
"(",
"value_data",
")",
",",
"self",
".",
"ARTIFACT_DEFINITION_NAME",
")",
")",
"if",
"not",
"knowledge_base",
".",
"GetHostname",
"(",
")",
":",
"hostname_artifact",
"=",
"artifacts",
".",
"HostnameArtifact",
"(",
"name",
"=",
"value_data",
")",
"knowledge_base",
".",
"SetHostname",
"(",
"hostname_artifact",
")"
]
| Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | [
"Parses",
"Windows",
"Registry",
"value",
"data",
"for",
"a",
"preprocessing",
"attribute",
"."
]
| python | train |
seomoz/reppy | reppy/cache/__init__.py | https://github.com/seomoz/reppy/blob/4cfa55894859a2eb2e656f191aeda5981c4df550/reppy/cache/__init__.py#L81-L83 | def allowed(self, url, agent):
'''Return true if the provided URL is allowed to agent.'''
return self.get(url).allowed(url, agent) | [
"def",
"allowed",
"(",
"self",
",",
"url",
",",
"agent",
")",
":",
"return",
"self",
".",
"get",
"(",
"url",
")",
".",
"allowed",
"(",
"url",
",",
"agent",
")"
]
| Return true if the provided URL is allowed to agent. | [
"Return",
"true",
"if",
"the",
"provided",
"URL",
"is",
"allowed",
"to",
"agent",
"."
]
| python | train |
NiklasRosenstein-Python/nr-deprecated | nr/tundras/field.py | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/tundras/field.py#L106-L115 | def check_type(self, value):
"""
Raises a #TypeError if *value* is not an instance of the field's #type.
"""
if self.null and value is None:
return
if self.type is not None and not isinstance(value, self.type):
msg = '{0!r} expected type {1}'
raise TypeError(msg.format(self.full_name, self.type.__name__)) | [
"def",
"check_type",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"null",
"and",
"value",
"is",
"None",
":",
"return",
"if",
"self",
".",
"type",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"self",
".",
"type",
")",
":",
"msg",
"=",
"'{0!r} expected type {1}'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"self",
".",
"full_name",
",",
"self",
".",
"type",
".",
"__name__",
")",
")"
]
| Raises a #TypeError if *value* is not an instance of the field's #type. | [
"Raises",
"a",
"#TypeError",
"if",
"*",
"value",
"*",
"is",
"not",
"an",
"instance",
"of",
"the",
"field",
"s",
"#type",
"."
]
| python | train |
swimlane/swimlane-python | swimlane/core/fields/reference.py | https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/fields/reference.py#L101-L117 | def set_swimlane(self, value):
"""Store record ids in separate location for later use, but ignore initial value"""
# Move single record into list to be handled the same by cursor class
if not self.multiselect:
if value and not isinstance(value, list):
value = [value]
# Values come in as a list of record ids or None
value = value or []
records = SortedDict()
for record_id in value:
records[record_id] = self._unset
return super(ReferenceField, self).set_swimlane(records) | [
"def",
"set_swimlane",
"(",
"self",
",",
"value",
")",
":",
"# Move single record into list to be handled the same by cursor class",
"if",
"not",
"self",
".",
"multiselect",
":",
"if",
"value",
"and",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"[",
"value",
"]",
"# Values come in as a list of record ids or None",
"value",
"=",
"value",
"or",
"[",
"]",
"records",
"=",
"SortedDict",
"(",
")",
"for",
"record_id",
"in",
"value",
":",
"records",
"[",
"record_id",
"]",
"=",
"self",
".",
"_unset",
"return",
"super",
"(",
"ReferenceField",
",",
"self",
")",
".",
"set_swimlane",
"(",
"records",
")"
]
| Store record ids in separate location for later use, but ignore initial value | [
"Store",
"record",
"ids",
"in",
"separate",
"location",
"for",
"later",
"use",
"but",
"ignore",
"initial",
"value"
]
| python | train |
Zaeb0s/max-threads | maxthreads/maxthreads.py | https://github.com/Zaeb0s/max-threads/blob/dce4ae784aa1c07fdb910359c0099907047403f9/maxthreads/maxthreads.py#L107-L141 | def add_task(self, target, args=(), kwargs=None, priority=None):
"""
Args:
target: A callable object to be invoked
args: Arguments sent to the callable object upon invocation
kwargs: Keyword arguments sent to the callable object upon invocation
priority: Determines where to put the callable object in the list of tasks, Can be any type of object that
is comparable using comparison operators (lower = higher priority)
Returns:
If a new thread was started returns the threading object otherwise returns None
Raises:
RuntimeError: If trying to add new task after closing object
"""
if self._stop:
raise RuntimeError("Can't add new task, the MaxThreads object is in closing/closed state")
new_thread = None
if (self.threads_active() < self._max_threads or not self._limit) \
and (self._threads_waiting == 0 and self._queue.qsize() > 0):
# The number of active threads is less than maximum number of threads
# OR there is no limit on the maximum number of threads
# AND there are no threads in waiting state
# i.e. start a new thread
new_thread = self._start_loop_thread()
self._queue.put(
SetPrio(target=target,
args=args,
kwargs=kwargs or {},
priority=priority or 0)
)
return new_thread | [
"def",
"add_task",
"(",
"self",
",",
"target",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
",",
"priority",
"=",
"None",
")",
":",
"if",
"self",
".",
"_stop",
":",
"raise",
"RuntimeError",
"(",
"\"Can't add new task, the MaxThreads object is in closing/closed state\"",
")",
"new_thread",
"=",
"None",
"if",
"(",
"self",
".",
"threads_active",
"(",
")",
"<",
"self",
".",
"_max_threads",
"or",
"not",
"self",
".",
"_limit",
")",
"and",
"(",
"self",
".",
"_threads_waiting",
"==",
"0",
"and",
"self",
".",
"_queue",
".",
"qsize",
"(",
")",
">",
"0",
")",
":",
"# The number of active threads is less than maximum number of threads",
"# OR there is no limit on the maximum number of threads",
"# AND there are no threads in waiting state",
"# i.e. start a new thread",
"new_thread",
"=",
"self",
".",
"_start_loop_thread",
"(",
")",
"self",
".",
"_queue",
".",
"put",
"(",
"SetPrio",
"(",
"target",
"=",
"target",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
"or",
"{",
"}",
",",
"priority",
"=",
"priority",
"or",
"0",
")",
")",
"return",
"new_thread"
]
| Args:
target: A callable object to be invoked
args: Arguments sent to the callable object upon invocation
kwargs: Keyword arguments sent to the callable object upon invocation
priority: Determines where to put the callable object in the list of tasks, Can be any type of object that
is comparable using comparison operators (lower = higher priority)
Returns:
If a new thread was started returns the threading object otherwise returns None
Raises:
RuntimeError: If trying to add new task after closing object | [
"Args",
":",
"target",
":",
"A",
"callable",
"object",
"to",
"be",
"invoked",
"args",
":",
"Arguments",
"sent",
"to",
"the",
"callable",
"object",
"upon",
"invocation",
"kwargs",
":",
"Keyword",
"arguments",
"sent",
"to",
"the",
"callable",
"object",
"upon",
"invocation",
"priority",
":",
"Determines",
"where",
"to",
"put",
"the",
"callable",
"object",
"in",
"the",
"list",
"of",
"tasks",
"Can",
"be",
"any",
"type",
"of",
"object",
"that",
"is",
"comparable",
"using",
"comparison",
"operators",
"(",
"lower",
"=",
"higher",
"priority",
")",
"Returns",
":",
"If",
"a",
"new",
"thread",
"was",
"started",
"returns",
"the",
"threading",
"object",
"otherwise",
"returns",
"None",
"Raises",
":",
"RuntimeError",
":",
"If",
"trying",
"to",
"add",
"new",
"task",
"after",
"closing",
"object"
]
| python | train |
mozilla/amo-validator | validator/errorbundler.py | https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/errorbundler.py#L161-L174 | def drop_message(self, message):
"""Drop the given message object from the appropriate message list.
Returns True if the message was found, otherwise False."""
for type_ in 'errors', 'warnings', 'notices':
list_ = getattr(self, type_)
if message in list_:
list_.remove(message)
if 'signing_severity' in message:
self.signing_summary[message['signing_severity']] -= 1
return True
return False | [
"def",
"drop_message",
"(",
"self",
",",
"message",
")",
":",
"for",
"type_",
"in",
"'errors'",
",",
"'warnings'",
",",
"'notices'",
":",
"list_",
"=",
"getattr",
"(",
"self",
",",
"type_",
")",
"if",
"message",
"in",
"list_",
":",
"list_",
".",
"remove",
"(",
"message",
")",
"if",
"'signing_severity'",
"in",
"message",
":",
"self",
".",
"signing_summary",
"[",
"message",
"[",
"'signing_severity'",
"]",
"]",
"-=",
"1",
"return",
"True",
"return",
"False"
]
| Drop the given message object from the appropriate message list.
Returns True if the message was found, otherwise False. | [
"Drop",
"the",
"given",
"message",
"object",
"from",
"the",
"appropriate",
"message",
"list",
"."
]
| python | train |
rstoneback/pysat | pysat/utils.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/utils.py#L78-L362 | def load_netcdf4(fnames=None, strict_meta=False, file_format=None, epoch_name='Epoch',
units_label='units', name_label='long_name',
notes_label='notes', desc_label='desc',
plot_label='label', axis_label='axis',
scale_label='scale',
min_label='value_min', max_label='value_max',
fill_label='fill'):
# unix_time=False, **kwargs):
"""Load netCDF-3/4 file produced by pysat.
Parameters
----------
fnames : string or array_like of strings
filenames to load
strict_meta : boolean
check if metadata across fnames is the same
file_format : string
file_format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
Returns
--------
out : pandas.core.frame.DataFrame
DataFrame output
mdata : pysat._meta.Meta
Meta data
"""
import netCDF4
import string
import pysat
if fnames is None:
raise ValueError("Must supply a filename/list of filenames")
if isinstance(fnames, basestring):
fnames = [fnames]
if file_format is None:
file_format = 'NETCDF4'
else:
file_format = file_format.upper()
saved_mdata = None
running_idx = 0
running_store=[]
two_d_keys = []; two_d_dims = []; three_d_keys = []; three_d_dims = [];
for fname in fnames:
with netCDF4.Dataset(fname, mode='r', format=file_format) as data:
# build up dictionary with all global ncattrs
# and add those attributes to a pysat meta object
ncattrsList = data.ncattrs()
mdata = pysat.Meta(units_label=units_label, name_label=name_label,
notes_label=notes_label, desc_label=desc_label,
plot_label=plot_label, axis_label=axis_label,
scale_label=scale_label,
min_label=min_label, max_label=max_label,
fill_label=fill_label)
for d in ncattrsList:
if hasattr(mdata, d):
mdata.__setattr__(d+'_', data.getncattr(d))
else:
mdata.__setattr__(d, data.getncattr(d))
# loadup all of the variables in the netCDF
loadedVars = {}
for key in data.variables.keys():
# load up metadata. From here group unique
# dimensions and act accordingly, 1D, 2D, 3D
if len(data.variables[key].dimensions) == 1:
# load 1D data variable
# assuming basic time dimension
loadedVars[key] = data.variables[key][:]
# if key != epoch_name:
# load up metadata
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = data.variables[key].getncattr(nc_key)
mdata[key] = meta_dict
if len(data.variables[key].dimensions) == 2:
# part of dataframe within dataframe
two_d_keys.append(key)
two_d_dims.append(data.variables[key].dimensions)
if len(data.variables[key].dimensions) == 3:
# part of full/dedicated dataframe within dataframe
three_d_keys.append(key)
three_d_dims.append(data.variables[key].dimensions)
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(two_d_dims):
# first dimension should be epoch
# second dimension name used as variable name
obj_key_name = dim[1]
# collect variable names associated with dimension
idx_bool = [dim == i for i in two_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
clean_var_keys = []
for i in idx:
obj_var_keys.append(two_d_keys[i])
clean_var_keys.append(two_d_keys[i].split(obj_key_name+'_')[-1])
# figure out how to index this data, it could provide its own
# index - or we may have to create simple integer based DataFrame access
# if the dimension is stored as its own variable then use that info for index
if obj_key_name in obj_var_keys:
# string used to indentify dimension also in data.variables
# will be used as an index
index_key_name = obj_key_name
# if the object index uses UNIX time, process into datetime index
if data.variables[obj_key_name].getncattr(name_label) == epoch_name:
# name to be used in DataFrame index
index_name = epoch_name
time_index_flag = True
else:
time_index_flag = False
# label to be used in DataFrame index
index_name = data.variables[obj_key_name].getncattr(name_label)
else:
# dimension is not itself a variable
index_key_name = None
# iterate over the variables and grab metadata
dim_meta_data = pysat.Meta(units_label=units_label, name_label=name_label,
notes_label=notes_label, desc_label=desc_label,
plot_label=plot_label, axis_label=axis_label,
scale_label=scale_label,
min_label=min_label, max_label=max_label,
fill_label=fill_label)
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# store attributes in metadata, exept for dim name
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = data.variables[key].getncattr(nc_key)
dim_meta_data[clean_key] = meta_dict
# print (dim_meta_data)
dim_meta_dict = {'meta':dim_meta_data}
if index_key_name is not None:
# add top level meta
for nc_key in data.variables[obj_key_name].ncattrs():
dim_meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = dim_meta_dict
# iterate over all variables with this dimension and store data
# data storage, whole shebang
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# data
loop_dict[clean_key] = data.variables[key][:,:].flatten(order='C')
# number of values in time
loop_lim = data.variables[obj_var_keys[0]].shape[0]
# number of values per time
step_size = len(data.variables[obj_var_keys[0]][0,:])
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
if file_format == 'NETCDF4':
time_var = pds.to_datetime(1E6*time_var)
else:
time_var = pds.to_datetime(1E6*time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size, dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
if len(loop_dict.keys()) > 1:
loop_frame = pds.DataFrame(loop_dict, columns=clean_var_keys)
if obj_key_name in loop_frame:
del loop_frame[obj_key_name]
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
else:
loop_frame = pds.Series(loop_dict[clean_var_keys[0]], name=obj_var_keys[0])
# break massive series into bunch of smaller series
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1)])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# print (loop_frame.columns)
# add 2D object data, all based on a unique dimension within
# netCDF, to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(three_d_dims):
# collect variable names associated with dimension
idx_bool = [dim == i for i in three_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
for i in idx:
obj_var_keys.append(three_d_keys[i])
for obj_key_name in obj_var_keys:
# store attributes in metadata
meta_dict = {}
for nc_key in data.variables[obj_key_name].ncattrs():
meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = meta_dict
# iterate over all variables with this dimension and store data
# data storage, whole shebang
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
loop_dict[obj_key_name] = data.variables[obj_key_name][:,:,:]
# number of values in time
loop_lim = data.variables[obj_key_name].shape[0]
# number of values per time
step_size_x = len(data.variables[obj_key_name][0, :, 0])
step_size_y = len(data.variables[obj_key_name][0, 0, :])
step_size = step_size_x
loop_dict[obj_key_name] = loop_dict[obj_key_name].reshape((loop_lim*step_size_x, step_size_y))
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
if file_format == 'NETCDF4':
time_var = pds.to_datetime(1E6*time_var)
else:
time_var = pds.to_datetime(1E6*time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size, dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
loop_frame = pds.DataFrame(loop_dict[obj_key_name])
# del loop_frame['dimension_1']
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# add 2D object data, all based on a unique dimension within netCDF,
# to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# prepare dataframe index for this netcdf file
time_var = loadedVars.pop(epoch_name)
# convert from GPS seconds to seconds used in pandas (unix time,
# no leap)
#time_var = convert_gps_to_unix_seconds(time_var)
if file_format == 'NETCDF4':
loadedVars[epoch_name] = pds.to_datetime((1E6 *
time_var).astype(int))
else:
loadedVars[epoch_name] = pds.to_datetime((time_var *
1E6).astype(int))
#loadedVars[epoch_name] = pds.to_datetime((time_var*1E6).astype(int))
running_store.append(loadedVars)
running_idx += len(loadedVars[epoch_name])
if strict_meta:
if saved_mdata is None:
saved_mdata = copy.deepcopy(mdata)
elif (mdata != saved_mdata):
raise ValueError('Metadata across filenames is not the ' +
'same.')
# combine all of the data loaded across files together
out = []
for item in running_store:
out.append(pds.DataFrame.from_records(item, index=epoch_name))
out = pds.concat(out, axis=0)
return out, mdata | [
"def",
"load_netcdf4",
"(",
"fnames",
"=",
"None",
",",
"strict_meta",
"=",
"False",
",",
"file_format",
"=",
"None",
",",
"epoch_name",
"=",
"'Epoch'",
",",
"units_label",
"=",
"'units'",
",",
"name_label",
"=",
"'long_name'",
",",
"notes_label",
"=",
"'notes'",
",",
"desc_label",
"=",
"'desc'",
",",
"plot_label",
"=",
"'label'",
",",
"axis_label",
"=",
"'axis'",
",",
"scale_label",
"=",
"'scale'",
",",
"min_label",
"=",
"'value_min'",
",",
"max_label",
"=",
"'value_max'",
",",
"fill_label",
"=",
"'fill'",
")",
":",
"# unix_time=False, **kwargs):",
"import",
"netCDF4",
"import",
"string",
"import",
"pysat",
"if",
"fnames",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must supply a filename/list of filenames\"",
")",
"if",
"isinstance",
"(",
"fnames",
",",
"basestring",
")",
":",
"fnames",
"=",
"[",
"fnames",
"]",
"if",
"file_format",
"is",
"None",
":",
"file_format",
"=",
"'NETCDF4'",
"else",
":",
"file_format",
"=",
"file_format",
".",
"upper",
"(",
")",
"saved_mdata",
"=",
"None",
"running_idx",
"=",
"0",
"running_store",
"=",
"[",
"]",
"two_d_keys",
"=",
"[",
"]",
"two_d_dims",
"=",
"[",
"]",
"three_d_keys",
"=",
"[",
"]",
"three_d_dims",
"=",
"[",
"]",
"for",
"fname",
"in",
"fnames",
":",
"with",
"netCDF4",
".",
"Dataset",
"(",
"fname",
",",
"mode",
"=",
"'r'",
",",
"format",
"=",
"file_format",
")",
"as",
"data",
":",
"# build up dictionary with all global ncattrs",
"# and add those attributes to a pysat meta object",
"ncattrsList",
"=",
"data",
".",
"ncattrs",
"(",
")",
"mdata",
"=",
"pysat",
".",
"Meta",
"(",
"units_label",
"=",
"units_label",
",",
"name_label",
"=",
"name_label",
",",
"notes_label",
"=",
"notes_label",
",",
"desc_label",
"=",
"desc_label",
",",
"plot_label",
"=",
"plot_label",
",",
"axis_label",
"=",
"axis_label",
",",
"scale_label",
"=",
"scale_label",
",",
"min_label",
"=",
"min_label",
",",
"max_label",
"=",
"max_label",
",",
"fill_label",
"=",
"fill_label",
")",
"for",
"d",
"in",
"ncattrsList",
":",
"if",
"hasattr",
"(",
"mdata",
",",
"d",
")",
":",
"mdata",
".",
"__setattr__",
"(",
"d",
"+",
"'_'",
",",
"data",
".",
"getncattr",
"(",
"d",
")",
")",
"else",
":",
"mdata",
".",
"__setattr__",
"(",
"d",
",",
"data",
".",
"getncattr",
"(",
"d",
")",
")",
"# loadup all of the variables in the netCDF",
"loadedVars",
"=",
"{",
"}",
"for",
"key",
"in",
"data",
".",
"variables",
".",
"keys",
"(",
")",
":",
"# load up metadata. From here group unique ",
"# dimensions and act accordingly, 1D, 2D, 3D ",
"if",
"len",
"(",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"dimensions",
")",
"==",
"1",
":",
"# load 1D data variable",
"# assuming basic time dimension",
"loadedVars",
"[",
"key",
"]",
"=",
"data",
".",
"variables",
"[",
"key",
"]",
"[",
":",
"]",
"# if key != epoch_name:",
"# load up metadata",
"meta_dict",
"=",
"{",
"}",
"for",
"nc_key",
"in",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"ncattrs",
"(",
")",
":",
"meta_dict",
"[",
"nc_key",
"]",
"=",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"getncattr",
"(",
"nc_key",
")",
"mdata",
"[",
"key",
"]",
"=",
"meta_dict",
"if",
"len",
"(",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"dimensions",
")",
"==",
"2",
":",
"# part of dataframe within dataframe",
"two_d_keys",
".",
"append",
"(",
"key",
")",
"two_d_dims",
".",
"append",
"(",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"dimensions",
")",
"if",
"len",
"(",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"dimensions",
")",
"==",
"3",
":",
"# part of full/dedicated dataframe within dataframe",
"three_d_keys",
".",
"append",
"(",
"key",
")",
"three_d_dims",
".",
"append",
"(",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"dimensions",
")",
"# we now have a list of keys that need to go into a dataframe,",
"# could be more than one, collect unique dimensions for 2D keys",
"for",
"dim",
"in",
"set",
"(",
"two_d_dims",
")",
":",
"# first dimension should be epoch",
"# second dimension name used as variable name",
"obj_key_name",
"=",
"dim",
"[",
"1",
"]",
"# collect variable names associated with dimension",
"idx_bool",
"=",
"[",
"dim",
"==",
"i",
"for",
"i",
"in",
"two_d_dims",
"]",
"idx",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"idx_bool",
")",
")",
"obj_var_keys",
"=",
"[",
"]",
"clean_var_keys",
"=",
"[",
"]",
"for",
"i",
"in",
"idx",
":",
"obj_var_keys",
".",
"append",
"(",
"two_d_keys",
"[",
"i",
"]",
")",
"clean_var_keys",
".",
"append",
"(",
"two_d_keys",
"[",
"i",
"]",
".",
"split",
"(",
"obj_key_name",
"+",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
"# figure out how to index this data, it could provide its own",
"# index - or we may have to create simple integer based DataFrame access",
"# if the dimension is stored as its own variable then use that info for index",
"if",
"obj_key_name",
"in",
"obj_var_keys",
":",
"# string used to indentify dimension also in data.variables ",
"# will be used as an index ",
"index_key_name",
"=",
"obj_key_name",
"# if the object index uses UNIX time, process into datetime index ",
"if",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"getncattr",
"(",
"name_label",
")",
"==",
"epoch_name",
":",
"# name to be used in DataFrame index",
"index_name",
"=",
"epoch_name",
"time_index_flag",
"=",
"True",
"else",
":",
"time_index_flag",
"=",
"False",
"# label to be used in DataFrame index",
"index_name",
"=",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"getncattr",
"(",
"name_label",
")",
"else",
":",
"# dimension is not itself a variable",
"index_key_name",
"=",
"None",
"# iterate over the variables and grab metadata",
"dim_meta_data",
"=",
"pysat",
".",
"Meta",
"(",
"units_label",
"=",
"units_label",
",",
"name_label",
"=",
"name_label",
",",
"notes_label",
"=",
"notes_label",
",",
"desc_label",
"=",
"desc_label",
",",
"plot_label",
"=",
"plot_label",
",",
"axis_label",
"=",
"axis_label",
",",
"scale_label",
"=",
"scale_label",
",",
"min_label",
"=",
"min_label",
",",
"max_label",
"=",
"max_label",
",",
"fill_label",
"=",
"fill_label",
")",
"for",
"key",
",",
"clean_key",
"in",
"zip",
"(",
"obj_var_keys",
",",
"clean_var_keys",
")",
":",
"# store attributes in metadata, exept for dim name",
"meta_dict",
"=",
"{",
"}",
"for",
"nc_key",
"in",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"ncattrs",
"(",
")",
":",
"meta_dict",
"[",
"nc_key",
"]",
"=",
"data",
".",
"variables",
"[",
"key",
"]",
".",
"getncattr",
"(",
"nc_key",
")",
"dim_meta_data",
"[",
"clean_key",
"]",
"=",
"meta_dict",
"# print (dim_meta_data)",
"dim_meta_dict",
"=",
"{",
"'meta'",
":",
"dim_meta_data",
"}",
"if",
"index_key_name",
"is",
"not",
"None",
":",
"# add top level meta",
"for",
"nc_key",
"in",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"ncattrs",
"(",
")",
":",
"dim_meta_dict",
"[",
"nc_key",
"]",
"=",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"getncattr",
"(",
"nc_key",
")",
"mdata",
"[",
"obj_key_name",
"]",
"=",
"dim_meta_dict",
"# iterate over all variables with this dimension and store data",
"# data storage, whole shebang",
"loop_dict",
"=",
"{",
"}",
"# list holds a series of slices, parsed from dict above",
"loop_list",
"=",
"[",
"]",
"for",
"key",
",",
"clean_key",
"in",
"zip",
"(",
"obj_var_keys",
",",
"clean_var_keys",
")",
":",
"# data",
"loop_dict",
"[",
"clean_key",
"]",
"=",
"data",
".",
"variables",
"[",
"key",
"]",
"[",
":",
",",
":",
"]",
".",
"flatten",
"(",
"order",
"=",
"'C'",
")",
"# number of values in time",
"loop_lim",
"=",
"data",
".",
"variables",
"[",
"obj_var_keys",
"[",
"0",
"]",
"]",
".",
"shape",
"[",
"0",
"]",
"# number of values per time",
"step_size",
"=",
"len",
"(",
"data",
".",
"variables",
"[",
"obj_var_keys",
"[",
"0",
"]",
"]",
"[",
"0",
",",
":",
"]",
")",
"# check if there is an index we should use",
"if",
"not",
"(",
"index_key_name",
"is",
"None",
")",
":",
"# an index was found",
"time_var",
"=",
"loop_dict",
".",
"pop",
"(",
"index_key_name",
")",
"if",
"time_index_flag",
":",
"# create datetime index from data",
"if",
"file_format",
"==",
"'NETCDF4'",
":",
"time_var",
"=",
"pds",
".",
"to_datetime",
"(",
"1E6",
"*",
"time_var",
")",
"else",
":",
"time_var",
"=",
"pds",
".",
"to_datetime",
"(",
"1E6",
"*",
"time_var",
")",
"new_index",
"=",
"time_var",
"new_index_name",
"=",
"index_name",
"else",
":",
"# using integer indexing",
"new_index",
"=",
"np",
".",
"arange",
"(",
"loop_lim",
"*",
"step_size",
",",
"dtype",
"=",
"int",
")",
"%",
"step_size",
"new_index_name",
"=",
"'index'",
"# load all data into frame",
"if",
"len",
"(",
"loop_dict",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"loop_frame",
"=",
"pds",
".",
"DataFrame",
"(",
"loop_dict",
",",
"columns",
"=",
"clean_var_keys",
")",
"if",
"obj_key_name",
"in",
"loop_frame",
":",
"del",
"loop_frame",
"[",
"obj_key_name",
"]",
"# break massive frame into bunch of smaller frames",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"loop_lim",
",",
"dtype",
"=",
"int",
")",
":",
"loop_list",
".",
"append",
"(",
"loop_frame",
".",
"iloc",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
",",
":",
"]",
")",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
"=",
"new_index",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
".",
"name",
"=",
"new_index_name",
"else",
":",
"loop_frame",
"=",
"pds",
".",
"Series",
"(",
"loop_dict",
"[",
"clean_var_keys",
"[",
"0",
"]",
"]",
",",
"name",
"=",
"obj_var_keys",
"[",
"0",
"]",
")",
"# break massive series into bunch of smaller series",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"loop_lim",
",",
"dtype",
"=",
"int",
")",
":",
"loop_list",
".",
"append",
"(",
"loop_frame",
".",
"iloc",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
"]",
")",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
"=",
"new_index",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
".",
"name",
"=",
"new_index_name",
"# print (loop_frame.columns)",
"# add 2D object data, all based on a unique dimension within",
"# netCDF, to loaded data dictionary",
"loadedVars",
"[",
"obj_key_name",
"]",
"=",
"loop_list",
"del",
"loop_list",
"# we now have a list of keys that need to go into a dataframe,",
"# could be more than one, collect unique dimensions for 2D keys",
"for",
"dim",
"in",
"set",
"(",
"three_d_dims",
")",
":",
"# collect variable names associated with dimension",
"idx_bool",
"=",
"[",
"dim",
"==",
"i",
"for",
"i",
"in",
"three_d_dims",
"]",
"idx",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"idx_bool",
")",
")",
"obj_var_keys",
"=",
"[",
"]",
"for",
"i",
"in",
"idx",
":",
"obj_var_keys",
".",
"append",
"(",
"three_d_keys",
"[",
"i",
"]",
")",
"for",
"obj_key_name",
"in",
"obj_var_keys",
":",
"# store attributes in metadata",
"meta_dict",
"=",
"{",
"}",
"for",
"nc_key",
"in",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"ncattrs",
"(",
")",
":",
"meta_dict",
"[",
"nc_key",
"]",
"=",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"getncattr",
"(",
"nc_key",
")",
"mdata",
"[",
"obj_key_name",
"]",
"=",
"meta_dict",
"# iterate over all variables with this dimension and store data",
"# data storage, whole shebang",
"loop_dict",
"=",
"{",
"}",
"# list holds a series of slices, parsed from dict above",
"loop_list",
"=",
"[",
"]",
"loop_dict",
"[",
"obj_key_name",
"]",
"=",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
"[",
":",
",",
":",
",",
":",
"]",
"# number of values in time",
"loop_lim",
"=",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
".",
"shape",
"[",
"0",
"]",
"# number of values per time",
"step_size_x",
"=",
"len",
"(",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
"[",
"0",
",",
":",
",",
"0",
"]",
")",
"step_size_y",
"=",
"len",
"(",
"data",
".",
"variables",
"[",
"obj_key_name",
"]",
"[",
"0",
",",
"0",
",",
":",
"]",
")",
"step_size",
"=",
"step_size_x",
"loop_dict",
"[",
"obj_key_name",
"]",
"=",
"loop_dict",
"[",
"obj_key_name",
"]",
".",
"reshape",
"(",
"(",
"loop_lim",
"*",
"step_size_x",
",",
"step_size_y",
")",
")",
"# check if there is an index we should use",
"if",
"not",
"(",
"index_key_name",
"is",
"None",
")",
":",
"# an index was found",
"time_var",
"=",
"loop_dict",
".",
"pop",
"(",
"index_key_name",
")",
"if",
"time_index_flag",
":",
"# create datetime index from data",
"if",
"file_format",
"==",
"'NETCDF4'",
":",
"time_var",
"=",
"pds",
".",
"to_datetime",
"(",
"1E6",
"*",
"time_var",
")",
"else",
":",
"time_var",
"=",
"pds",
".",
"to_datetime",
"(",
"1E6",
"*",
"time_var",
")",
"new_index",
"=",
"time_var",
"new_index_name",
"=",
"index_name",
"else",
":",
"# using integer indexing",
"new_index",
"=",
"np",
".",
"arange",
"(",
"loop_lim",
"*",
"step_size",
",",
"dtype",
"=",
"int",
")",
"%",
"step_size",
"new_index_name",
"=",
"'index'",
"# load all data into frame",
"loop_frame",
"=",
"pds",
".",
"DataFrame",
"(",
"loop_dict",
"[",
"obj_key_name",
"]",
")",
"# del loop_frame['dimension_1']",
"# break massive frame into bunch of smaller frames",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"loop_lim",
",",
"dtype",
"=",
"int",
")",
":",
"loop_list",
".",
"append",
"(",
"loop_frame",
".",
"iloc",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
",",
":",
"]",
")",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
"=",
"new_index",
"[",
"step_size",
"*",
"i",
":",
"step_size",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"loop_list",
"[",
"-",
"1",
"]",
".",
"index",
".",
"name",
"=",
"new_index_name",
"# add 2D object data, all based on a unique dimension within netCDF,",
"# to loaded data dictionary",
"loadedVars",
"[",
"obj_key_name",
"]",
"=",
"loop_list",
"del",
"loop_list",
"# prepare dataframe index for this netcdf file",
"time_var",
"=",
"loadedVars",
".",
"pop",
"(",
"epoch_name",
")",
"# convert from GPS seconds to seconds used in pandas (unix time,",
"# no leap)",
"#time_var = convert_gps_to_unix_seconds(time_var)",
"if",
"file_format",
"==",
"'NETCDF4'",
":",
"loadedVars",
"[",
"epoch_name",
"]",
"=",
"pds",
".",
"to_datetime",
"(",
"(",
"1E6",
"*",
"time_var",
")",
".",
"astype",
"(",
"int",
")",
")",
"else",
":",
"loadedVars",
"[",
"epoch_name",
"]",
"=",
"pds",
".",
"to_datetime",
"(",
"(",
"time_var",
"*",
"1E6",
")",
".",
"astype",
"(",
"int",
")",
")",
"#loadedVars[epoch_name] = pds.to_datetime((time_var*1E6).astype(int))",
"running_store",
".",
"append",
"(",
"loadedVars",
")",
"running_idx",
"+=",
"len",
"(",
"loadedVars",
"[",
"epoch_name",
"]",
")",
"if",
"strict_meta",
":",
"if",
"saved_mdata",
"is",
"None",
":",
"saved_mdata",
"=",
"copy",
".",
"deepcopy",
"(",
"mdata",
")",
"elif",
"(",
"mdata",
"!=",
"saved_mdata",
")",
":",
"raise",
"ValueError",
"(",
"'Metadata across filenames is not the '",
"+",
"'same.'",
")",
"# combine all of the data loaded across files together",
"out",
"=",
"[",
"]",
"for",
"item",
"in",
"running_store",
":",
"out",
".",
"append",
"(",
"pds",
".",
"DataFrame",
".",
"from_records",
"(",
"item",
",",
"index",
"=",
"epoch_name",
")",
")",
"out",
"=",
"pds",
".",
"concat",
"(",
"out",
",",
"axis",
"=",
"0",
")",
"return",
"out",
",",
"mdata"
]
| Load netCDF-3/4 file produced by pysat.
Parameters
----------
fnames : string or array_like of strings
filenames to load
strict_meta : boolean
check if metadata across fnames is the same
file_format : string
file_format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
Returns
--------
out : pandas.core.frame.DataFrame
DataFrame output
mdata : pysat._meta.Meta
Meta data | [
"Load",
"netCDF",
"-",
"3",
"/",
"4",
"file",
"produced",
"by",
"pysat",
"."
]
| python | train |
gboeing/osmnx | osmnx/plot.py | https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/plot.py#L100-L115 | def rgb_color_list_to_hex(color_list):
"""
Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list
"""
color_list_rgb = [[int(x*255) for x in c[0:3]] for c in color_list]
color_list_hex = ['#{:02X}{:02X}{:02X}'.format(rgb[0], rgb[1], rgb[2]) for rgb in color_list_rgb]
return color_list_hex | [
"def",
"rgb_color_list_to_hex",
"(",
"color_list",
")",
":",
"color_list_rgb",
"=",
"[",
"[",
"int",
"(",
"x",
"*",
"255",
")",
"for",
"x",
"in",
"c",
"[",
"0",
":",
"3",
"]",
"]",
"for",
"c",
"in",
"color_list",
"]",
"color_list_hex",
"=",
"[",
"'#{:02X}{:02X}{:02X}'",
".",
"format",
"(",
"rgb",
"[",
"0",
"]",
",",
"rgb",
"[",
"1",
"]",
",",
"rgb",
"[",
"2",
"]",
")",
"for",
"rgb",
"in",
"color_list_rgb",
"]",
"return",
"color_list_hex"
]
| Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list | [
"Convert",
"a",
"list",
"of",
"RGBa",
"colors",
"to",
"a",
"list",
"of",
"hexadecimal",
"color",
"codes",
"."
]
| python | train |
jobovy/galpy | galpy/potential/SpiralArmsPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SpiralArmsPotential.py#L570-L574 | def _B(self, R):
"""Return numpy array from B1 up to and including Bn. (eqn. 6)"""
HNn_R = self._HNn / R
return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1) | [
"def",
"_B",
"(",
"self",
",",
"R",
")",
":",
"HNn_R",
"=",
"self",
".",
"_HNn",
"/",
"R",
"return",
"HNn_R",
"/",
"self",
".",
"_sin_alpha",
"*",
"(",
"0.4",
"*",
"HNn_R",
"/",
"self",
".",
"_sin_alpha",
"+",
"1",
")"
]
| Return numpy array from B1 up to and including Bn. (eqn. 6) | [
"Return",
"numpy",
"array",
"from",
"B1",
"up",
"to",
"and",
"including",
"Bn",
".",
"(",
"eqn",
".",
"6",
")"
]
| python | train |
tgsmith61591/pmdarima | pmdarima/arima/arima.py | https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L515-L584 | def predict(self, n_periods=10, exogenous=None,
return_conf_int=False, alpha=0.05):
"""Forecast future values
Generate predictions (forecasts) ``n_periods`` in the future.
Note that if ``exogenous`` variables were used in the model fit, they
will be expected for the predict procedure and will fail otherwise.
Parameters
----------
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
return_conf_int : bool, optional (default=False)
Whether to get the confidence intervals of the forecasts.
alpha : float, optional (default=0.05)
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecasts : array-like, shape=(n_periods,)
The array of fore-casted values.
conf_int : array-like, shape=(n_periods, 2), optional
The confidence intervals for the forecasts. Only returned if
``return_conf_int`` is True.
"""
check_is_fitted(self, 'arima_res_')
if not isinstance(n_periods, (int, long)):
raise TypeError("n_periods must be an int or a long")
# if we fit with exog, make sure one was passed:
exogenous = self._check_exog(exogenous) # type: np.ndarray
if exogenous is not None and exogenous.shape[0] != n_periods:
raise ValueError('Exogenous array dims (n_rows) != n_periods')
# ARIMA/ARMA predict differently...
if not self._is_seasonal():
# use the results wrapper to predict so it injects its own params
# (also if I was 0, ARMA will not have a forecast method natively)
f, _, conf_int = self.arima_res_.forecast(
steps=n_periods, exog=exogenous, alpha=alpha)
else: # SARIMAX
# Unfortunately, SARIMAX does not really provide a nice way to get
# the confidence intervals out of the box, so we have to perform
# the get_prediction code here and unpack the confidence intervals
# manually.
# f = self.arima_res_.forecast(steps=n_periods, exog=exogenous)
arima = self.arima_res_
end = arima.nobs + n_periods - 1
results = arima.get_prediction(start=arima.nobs,
end=end,
exog=exogenous)
f = results.predicted_mean
conf_int = results.conf_int(alpha=alpha)
if return_conf_int:
# The confidence intervals may be a Pandas frame if it comes from
# SARIMAX & we want Numpy. We will to duck type it so we don't add
# new explicit requirements for the package
return f, check_array(conf_int, force_all_finite=False)
return f | [
"def",
"predict",
"(",
"self",
",",
"n_periods",
"=",
"10",
",",
"exogenous",
"=",
"None",
",",
"return_conf_int",
"=",
"False",
",",
"alpha",
"=",
"0.05",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'arima_res_'",
")",
"if",
"not",
"isinstance",
"(",
"n_periods",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"n_periods must be an int or a long\"",
")",
"# if we fit with exog, make sure one was passed:",
"exogenous",
"=",
"self",
".",
"_check_exog",
"(",
"exogenous",
")",
"# type: np.ndarray",
"if",
"exogenous",
"is",
"not",
"None",
"and",
"exogenous",
".",
"shape",
"[",
"0",
"]",
"!=",
"n_periods",
":",
"raise",
"ValueError",
"(",
"'Exogenous array dims (n_rows) != n_periods'",
")",
"# ARIMA/ARMA predict differently...",
"if",
"not",
"self",
".",
"_is_seasonal",
"(",
")",
":",
"# use the results wrapper to predict so it injects its own params",
"# (also if I was 0, ARMA will not have a forecast method natively)",
"f",
",",
"_",
",",
"conf_int",
"=",
"self",
".",
"arima_res_",
".",
"forecast",
"(",
"steps",
"=",
"n_periods",
",",
"exog",
"=",
"exogenous",
",",
"alpha",
"=",
"alpha",
")",
"else",
":",
"# SARIMAX",
"# Unfortunately, SARIMAX does not really provide a nice way to get",
"# the confidence intervals out of the box, so we have to perform",
"# the get_prediction code here and unpack the confidence intervals",
"# manually.",
"# f = self.arima_res_.forecast(steps=n_periods, exog=exogenous)",
"arima",
"=",
"self",
".",
"arima_res_",
"end",
"=",
"arima",
".",
"nobs",
"+",
"n_periods",
"-",
"1",
"results",
"=",
"arima",
".",
"get_prediction",
"(",
"start",
"=",
"arima",
".",
"nobs",
",",
"end",
"=",
"end",
",",
"exog",
"=",
"exogenous",
")",
"f",
"=",
"results",
".",
"predicted_mean",
"conf_int",
"=",
"results",
".",
"conf_int",
"(",
"alpha",
"=",
"alpha",
")",
"if",
"return_conf_int",
":",
"# The confidence intervals may be a Pandas frame if it comes from",
"# SARIMAX & we want Numpy. We will to duck type it so we don't add",
"# new explicit requirements for the package",
"return",
"f",
",",
"check_array",
"(",
"conf_int",
",",
"force_all_finite",
"=",
"False",
")",
"return",
"f"
]
| Forecast future values
Generate predictions (forecasts) ``n_periods`` in the future.
Note that if ``exogenous`` variables were used in the model fit, they
will be expected for the predict procedure and will fail otherwise.
Parameters
----------
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
return_conf_int : bool, optional (default=False)
Whether to get the confidence intervals of the forecasts.
alpha : float, optional (default=0.05)
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecasts : array-like, shape=(n_periods,)
The array of fore-casted values.
conf_int : array-like, shape=(n_periods, 2), optional
The confidence intervals for the forecasts. Only returned if
``return_conf_int`` is True. | [
"Forecast",
"future",
"values"
]
| python | train |
woolfson-group/isambard | isambard/ampal/analyse_protein.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/analyse_protein.py#L674-L724 | def make_primitive_extrapolate_ends(cas_coords, smoothing_level=2):
"""Generates smoothed helix primitives and extrapolates lost ends.
Notes
-----
From an input list of CA coordinates, the running average is
calculated to form a primitive. The smoothing_level dictates how
many times to calculate the running average. A higher
smoothing_level generates a 'smoother' primitive - i.e. the
points on the primitive more closely fit a smooth curve in R^3.
Each time the smoothing level is increased by 1, a point is lost
from either end of the primitive. To correct for this, the primitive
is extrapolated at the ends to approximate the lost values. There
is a trade-off then between the smoothness of the primitive and
its accuracy at the ends.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int
Number of times to run the averaging.
Returns
-------
final_primitive : list(numpy.array)
Each array has length 3.
"""
try:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level)
except ValueError:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
# if returned smoothed primitive is too short, lower the smoothing
# level and try again.
if len(smoothed_primitive) < 3:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
final_primitive = []
for ca in cas_coords:
prim_dists = [distance(ca, p) for p in smoothed_primitive]
closest_indices = sorted([x[0] for x in sorted(
enumerate(prim_dists), key=lambda k: k[1])[:3]])
a, b, c = [smoothed_primitive[x] for x in closest_indices]
ab_foot = find_foot(a, b, ca)
bc_foot = find_foot(b, c, ca)
ca_foot = (ab_foot + bc_foot) / 2
final_primitive.append(ca_foot)
return final_primitive | [
"def",
"make_primitive_extrapolate_ends",
"(",
"cas_coords",
",",
"smoothing_level",
"=",
"2",
")",
":",
"try",
":",
"smoothed_primitive",
"=",
"make_primitive_smoothed",
"(",
"cas_coords",
",",
"smoothing_level",
"=",
"smoothing_level",
")",
"except",
"ValueError",
":",
"smoothed_primitive",
"=",
"make_primitive_smoothed",
"(",
"cas_coords",
",",
"smoothing_level",
"=",
"smoothing_level",
"-",
"1",
")",
"# if returned smoothed primitive is too short, lower the smoothing",
"# level and try again.",
"if",
"len",
"(",
"smoothed_primitive",
")",
"<",
"3",
":",
"smoothed_primitive",
"=",
"make_primitive_smoothed",
"(",
"cas_coords",
",",
"smoothing_level",
"=",
"smoothing_level",
"-",
"1",
")",
"final_primitive",
"=",
"[",
"]",
"for",
"ca",
"in",
"cas_coords",
":",
"prim_dists",
"=",
"[",
"distance",
"(",
"ca",
",",
"p",
")",
"for",
"p",
"in",
"smoothed_primitive",
"]",
"closest_indices",
"=",
"sorted",
"(",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"sorted",
"(",
"enumerate",
"(",
"prim_dists",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"1",
"]",
")",
"[",
":",
"3",
"]",
"]",
")",
"a",
",",
"b",
",",
"c",
"=",
"[",
"smoothed_primitive",
"[",
"x",
"]",
"for",
"x",
"in",
"closest_indices",
"]",
"ab_foot",
"=",
"find_foot",
"(",
"a",
",",
"b",
",",
"ca",
")",
"bc_foot",
"=",
"find_foot",
"(",
"b",
",",
"c",
",",
"ca",
")",
"ca_foot",
"=",
"(",
"ab_foot",
"+",
"bc_foot",
")",
"/",
"2",
"final_primitive",
".",
"append",
"(",
"ca_foot",
")",
"return",
"final_primitive"
]
| Generates smoothed helix primitives and extrapolates lost ends.
Notes
-----
From an input list of CA coordinates, the running average is
calculated to form a primitive. The smoothing_level dictates how
many times to calculate the running average. A higher
smoothing_level generates a 'smoother' primitive - i.e. the
points on the primitive more closely fit a smooth curve in R^3.
Each time the smoothing level is increased by 1, a point is lost
from either end of the primitive. To correct for this, the primitive
is extrapolated at the ends to approximate the lost values. There
is a trade-off then between the smoothness of the primitive and
its accuracy at the ends.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int
Number of times to run the averaging.
Returns
-------
final_primitive : list(numpy.array)
Each array has length 3. | [
"Generates",
"smoothed",
"helix",
"primitives",
"and",
"extrapolates",
"lost",
"ends",
"."
]
| python | train |
senaite/senaite.core | bika/lims/browser/widgets/serviceswidget.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/serviceswidget.py#L129-L190 | def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
# ensure we have an object and not a brain
obj = api.get_object(obj)
uid = api.get_uid(obj)
url = api.get_url(obj)
title = api.get_title(obj)
# get the category
if self.show_categories_enabled():
category = obj.getCategoryTitle()
if category not in self.categories:
self.categories.append(category)
item["category"] = category
item["replace"]["Title"] = get_link(url, value=title)
item["selected"] = False
item["selected"] = uid in self.selected_services_uids
# Add methods
methods = obj.getMethods()
if methods:
links = map(
lambda m: get_link(
m.absolute_url(), value=m.Title(), css_class="link"),
methods)
item["replace"]["Methods"] = ", ".join(links)
else:
item["methods"] = ""
calculation = obj.getCalculation()
if calculation:
title = calculation.Title()
url = calculation.absolute_url()
item["Calculation"] = title
item["replace"]["Calculation"] = get_link(url, value=title)
else:
item["Calculation"] = ""
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | [
"def",
"folderitem",
"(",
"self",
",",
"obj",
",",
"item",
",",
"index",
")",
":",
"# ensure we have an object and not a brain",
"obj",
"=",
"api",
".",
"get_object",
"(",
"obj",
")",
"uid",
"=",
"api",
".",
"get_uid",
"(",
"obj",
")",
"url",
"=",
"api",
".",
"get_url",
"(",
"obj",
")",
"title",
"=",
"api",
".",
"get_title",
"(",
"obj",
")",
"# get the category",
"if",
"self",
".",
"show_categories_enabled",
"(",
")",
":",
"category",
"=",
"obj",
".",
"getCategoryTitle",
"(",
")",
"if",
"category",
"not",
"in",
"self",
".",
"categories",
":",
"self",
".",
"categories",
".",
"append",
"(",
"category",
")",
"item",
"[",
"\"category\"",
"]",
"=",
"category",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Title\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"value",
"=",
"title",
")",
"item",
"[",
"\"selected\"",
"]",
"=",
"False",
"item",
"[",
"\"selected\"",
"]",
"=",
"uid",
"in",
"self",
".",
"selected_services_uids",
"# Add methods",
"methods",
"=",
"obj",
".",
"getMethods",
"(",
")",
"if",
"methods",
":",
"links",
"=",
"map",
"(",
"lambda",
"m",
":",
"get_link",
"(",
"m",
".",
"absolute_url",
"(",
")",
",",
"value",
"=",
"m",
".",
"Title",
"(",
")",
",",
"css_class",
"=",
"\"link\"",
")",
",",
"methods",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Methods\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"links",
")",
"else",
":",
"item",
"[",
"\"methods\"",
"]",
"=",
"\"\"",
"calculation",
"=",
"obj",
".",
"getCalculation",
"(",
")",
"if",
"calculation",
":",
"title",
"=",
"calculation",
".",
"Title",
"(",
")",
"url",
"=",
"calculation",
".",
"absolute_url",
"(",
")",
"item",
"[",
"\"Calculation\"",
"]",
"=",
"title",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Calculation\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"value",
"=",
"title",
")",
"else",
":",
"item",
"[",
"\"Calculation\"",
"]",
"=",
"\"\"",
"# Icons",
"after_icons",
"=",
"\"\"",
"if",
"obj",
".",
"getAccredited",
"(",
")",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"accredited.png\"",
",",
"title",
"=",
"_",
"(",
"\"Accredited\"",
")",
")",
"if",
"obj",
".",
"getAttachmentOption",
"(",
")",
"==",
"\"r\"",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"attach_reqd.png\"",
",",
"title",
"=",
"_",
"(",
"\"Attachment required\"",
")",
")",
"if",
"obj",
".",
"getAttachmentOption",
"(",
")",
"==",
"\"n\"",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"attach_no.png\"",
",",
"title",
"=",
"_",
"(",
"\"Attachment not permitted\"",
")",
")",
"if",
"after_icons",
":",
"item",
"[",
"\"after\"",
"]",
"[",
"\"Title\"",
"]",
"=",
"after_icons",
"return",
"item"
]
| Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item | [
"Service",
"triggered",
"each",
"time",
"an",
"item",
"is",
"iterated",
"in",
"folderitems",
"."
]
| python | train |
zyga/python-glibc | pyglibc/_signalfd.py | https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/_signalfd.py#L103-L116 | def close(self):
"""
Close the internal signalfd file descriptor if it isn't closed
:raises OSError:
If the underlying ``close(2)`` fails. The error message matches
those found in the manual page.
"""
with self._close_lock:
sfd = self._sfd
if sfd >= 0:
self._sfd = -1
self._signals = frozenset()
close(sfd) | [
"def",
"close",
"(",
"self",
")",
":",
"with",
"self",
".",
"_close_lock",
":",
"sfd",
"=",
"self",
".",
"_sfd",
"if",
"sfd",
">=",
"0",
":",
"self",
".",
"_sfd",
"=",
"-",
"1",
"self",
".",
"_signals",
"=",
"frozenset",
"(",
")",
"close",
"(",
"sfd",
")"
]
| Close the internal signalfd file descriptor if it isn't closed
:raises OSError:
If the underlying ``close(2)`` fails. The error message matches
those found in the manual page. | [
"Close",
"the",
"internal",
"signalfd",
"file",
"descriptor",
"if",
"it",
"isn",
"t",
"closed"
]
| python | train |
oasis-open/cti-stix-validator | stix2validator/v21/shoulds.py | https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L43-L52 | def custom_prefix_lax(instance):
"""Ensure custom content follows lenient naming style conventions
for forward-compatibility.
"""
for error in chain(custom_object_prefix_lax(instance),
custom_property_prefix_lax(instance),
custom_observable_object_prefix_lax(instance),
custom_object_extension_prefix_lax(instance),
custom_observable_properties_prefix_lax(instance)):
yield error | [
"def",
"custom_prefix_lax",
"(",
"instance",
")",
":",
"for",
"error",
"in",
"chain",
"(",
"custom_object_prefix_lax",
"(",
"instance",
")",
",",
"custom_property_prefix_lax",
"(",
"instance",
")",
",",
"custom_observable_object_prefix_lax",
"(",
"instance",
")",
",",
"custom_object_extension_prefix_lax",
"(",
"instance",
")",
",",
"custom_observable_properties_prefix_lax",
"(",
"instance",
")",
")",
":",
"yield",
"error"
]
| Ensure custom content follows lenient naming style conventions
for forward-compatibility. | [
"Ensure",
"custom",
"content",
"follows",
"lenient",
"naming",
"style",
"conventions",
"for",
"forward",
"-",
"compatibility",
"."
]
| python | train |
jeroyang/txttk | txttk/nlptools.py | https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L130-L154 | def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper | [
"def",
"count_start",
"(",
"tokenizer",
")",
":",
"def",
"wrapper",
"(",
"context",
",",
"base",
")",
":",
"tokens",
"=",
"list",
"(",
"tokenizer",
"(",
"context",
")",
")",
"flag",
"=",
"0",
"for",
"token",
"in",
"tokens",
":",
"start",
"=",
"context",
".",
"index",
"(",
"token",
",",
"flag",
")",
"flag",
"=",
"start",
"+",
"len",
"(",
"token",
")",
"yield",
"(",
"token",
",",
"base",
"+",
"start",
")",
"return",
"wrapper"
]
| A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
... | [
"A",
"decorator",
"which",
"wrap",
"the",
"given",
"tokenizer",
"to",
"yield",
"(",
"token",
"start",
")",
".",
"Notice!",
"the",
"decorated",
"tokenizer",
"must",
"take",
"a",
"int",
"arguments",
"stands",
"for",
"the",
"start",
"position",
"of",
"the",
"input",
"context",
"/",
"sentence"
]
| python | train |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L367-L388 | def plot(
self,
best=False,
ax=None,
title=None,
figsize=None,
temp_range=None,
alpha=None,
**kwargs
):
""" Plot
"""
return plot_caltrack_candidate(
self,
best=best,
ax=ax,
title=title,
figsize=figsize,
temp_range=temp_range,
alpha=alpha,
**kwargs
) | [
"def",
"plot",
"(",
"self",
",",
"best",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"title",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"temp_range",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"plot_caltrack_candidate",
"(",
"self",
",",
"best",
"=",
"best",
",",
"ax",
"=",
"ax",
",",
"title",
"=",
"title",
",",
"figsize",
"=",
"figsize",
",",
"temp_range",
"=",
"temp_range",
",",
"alpha",
"=",
"alpha",
",",
"*",
"*",
"kwargs",
")"
]
| Plot | [
"Plot"
]
| python | train |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L498-L512 | def __update_molar_mass(self, compound='', element=''):
"""Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_molar_mass_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_mass = self.stack[compound][element]['isotopes']['mass']['value']
ratio_mass = zip(list_ratio, list_mass)
for _ratio, _mass in ratio_mass:
_molar_mass_element += np.float(_ratio) * np.float(_mass)
self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element | [
"def",
"__update_molar_mass",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
")",
":",
"_molar_mass_element",
"=",
"0",
"list_ratio",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"list_mass",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'mass'",
"]",
"[",
"'value'",
"]",
"ratio_mass",
"=",
"zip",
"(",
"list_ratio",
",",
"list_mass",
")",
"for",
"_ratio",
",",
"_mass",
"in",
"ratio_mass",
":",
"_molar_mass_element",
"+=",
"np",
".",
"float",
"(",
"_ratio",
")",
"*",
"np",
".",
"float",
"(",
"_mass",
")",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'molar_mass'",
"]",
"[",
"'value'",
"]",
"=",
"_molar_mass_element"
]
| Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element | [
"Re",
"-",
"calculate",
"the",
"molar",
"mass",
"of",
"the",
"element",
"given",
"due",
"to",
"stoichiometric",
"changes"
]
| python | train |
radjkarl/fancyTools | fancytools/geometry/polygon.py | https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polygon.py#L43-L74 | def pointInsidePolygon(x, y, poly):
"""
Determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs.
[code taken from: http://www.ariel.com.au/a/python-point-int-poly.html]
let's make an easy square:
>>> poly = [ (0,0),\
(1,0),\
(1,1),\
(0,1) ]
>>> pointInsidePolygon(0.5,0.5, poly)
True
>>> pointInsidePolygon(1.5,1.5, poly)
False
"""
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside | [
"def",
"pointInsidePolygon",
"(",
"x",
",",
"y",
",",
"poly",
")",
":",
"n",
"=",
"len",
"(",
"poly",
")",
"inside",
"=",
"False",
"p1x",
",",
"p1y",
"=",
"poly",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
"+",
"1",
")",
":",
"p2x",
",",
"p2y",
"=",
"poly",
"[",
"i",
"%",
"n",
"]",
"if",
"y",
">",
"min",
"(",
"p1y",
",",
"p2y",
")",
":",
"if",
"y",
"<=",
"max",
"(",
"p1y",
",",
"p2y",
")",
":",
"if",
"x",
"<=",
"max",
"(",
"p1x",
",",
"p2x",
")",
":",
"if",
"p1y",
"!=",
"p2y",
":",
"xinters",
"=",
"(",
"y",
"-",
"p1y",
")",
"*",
"(",
"p2x",
"-",
"p1x",
")",
"/",
"(",
"p2y",
"-",
"p1y",
")",
"+",
"p1x",
"if",
"p1x",
"==",
"p2x",
"or",
"x",
"<=",
"xinters",
":",
"inside",
"=",
"not",
"inside",
"p1x",
",",
"p1y",
"=",
"p2x",
",",
"p2y",
"return",
"inside"
]
| Determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs.
[code taken from: http://www.ariel.com.au/a/python-point-int-poly.html]
let's make an easy square:
>>> poly = [ (0,0),\
(1,0),\
(1,1),\
(0,1) ]
>>> pointInsidePolygon(0.5,0.5, poly)
True
>>> pointInsidePolygon(1.5,1.5, poly)
False | [
"Determine",
"if",
"a",
"point",
"is",
"inside",
"a",
"given",
"polygon",
"or",
"not",
"Polygon",
"is",
"a",
"list",
"of",
"(",
"x",
"y",
")",
"pairs",
"."
]
| python | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L389-L413 | def add(cls, model, commit=True):
"""Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="[email protected]")
>>> Customer.add(customer)
[email protected]
"""
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' % (model, cls))
cls.session.add(model)
try:
if commit:
cls.session.commit()
return model
except:
cls.session.rollback()
raise | [
"def",
"add",
"(",
"cls",
",",
"model",
",",
"commit",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"model",
",",
"cls",
")",
":",
"raise",
"ValueError",
"(",
"'%s is not of type %s'",
"%",
"(",
"model",
",",
"cls",
")",
")",
"cls",
".",
"session",
".",
"add",
"(",
"model",
")",
"try",
":",
"if",
"commit",
":",
"cls",
".",
"session",
".",
"commit",
"(",
")",
"return",
"model",
"except",
":",
"cls",
".",
"session",
".",
"rollback",
"(",
")",
"raise"
]
| Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="[email protected]")
>>> Customer.add(customer)
[email protected] | [
"Adds",
"a",
"model",
"instance",
"to",
"session",
"and",
"commits",
"the",
"transaction",
"."
]
| python | train |
hhatto/autopep8 | autopep8.py | https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1206-L1218 | def fix_w391(self, _):
"""Remove trailing blank lines."""
blank_count = 0
for line in reversed(self.source):
line = line.rstrip()
if line:
break
else:
blank_count += 1
original_length = len(self.source)
self.source = self.source[:original_length - blank_count]
return range(1, 1 + original_length) | [
"def",
"fix_w391",
"(",
"self",
",",
"_",
")",
":",
"blank_count",
"=",
"0",
"for",
"line",
"in",
"reversed",
"(",
"self",
".",
"source",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"line",
":",
"break",
"else",
":",
"blank_count",
"+=",
"1",
"original_length",
"=",
"len",
"(",
"self",
".",
"source",
")",
"self",
".",
"source",
"=",
"self",
".",
"source",
"[",
":",
"original_length",
"-",
"blank_count",
"]",
"return",
"range",
"(",
"1",
",",
"1",
"+",
"original_length",
")"
]
| Remove trailing blank lines. | [
"Remove",
"trailing",
"blank",
"lines",
"."
]
| python | train |
5monkeys/django-bananas | bananas/admin/api/schemas/yasg.py | https://github.com/5monkeys/django-bananas/blob/cfd318c737f6c4580036c13d2acf32bca96654bf/bananas/admin/api/schemas/yasg.py#L45-L76 | def get_summary(self):
"""
Compat: drf-yasg 1.11
"""
title = None
method_name = getattr(self.view, "action", self.method.lower())
action = getattr(self.view, method_name, None)
action_kwargs = getattr(action, "kwargs", None)
if action_kwargs:
title = action_kwargs.get("name")
if not title and is_custom_action(self.view.action):
title = _(self.view.action.replace("_", " ")).capitalize()
if not title:
meta = self.view.get_admin_meta()
if self.view.action in ["retrieve", "update", "partial_update"]:
title = str(meta.get("verbose_name") or meta.name)
elif self.view.action == "create":
title = meta.get("verbose_name")
if title:
title = str(_("Add")) + " " + str(title).lower()
else:
title = meta.name
elif self.view.action == "list":
title = str(meta.get("verbose_name_plural") or meta.name)
else:
title = str(meta.name)
return title | [
"def",
"get_summary",
"(",
"self",
")",
":",
"title",
"=",
"None",
"method_name",
"=",
"getattr",
"(",
"self",
".",
"view",
",",
"\"action\"",
",",
"self",
".",
"method",
".",
"lower",
"(",
")",
")",
"action",
"=",
"getattr",
"(",
"self",
".",
"view",
",",
"method_name",
",",
"None",
")",
"action_kwargs",
"=",
"getattr",
"(",
"action",
",",
"\"kwargs\"",
",",
"None",
")",
"if",
"action_kwargs",
":",
"title",
"=",
"action_kwargs",
".",
"get",
"(",
"\"name\"",
")",
"if",
"not",
"title",
"and",
"is_custom_action",
"(",
"self",
".",
"view",
".",
"action",
")",
":",
"title",
"=",
"_",
"(",
"self",
".",
"view",
".",
"action",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
")",
".",
"capitalize",
"(",
")",
"if",
"not",
"title",
":",
"meta",
"=",
"self",
".",
"view",
".",
"get_admin_meta",
"(",
")",
"if",
"self",
".",
"view",
".",
"action",
"in",
"[",
"\"retrieve\"",
",",
"\"update\"",
",",
"\"partial_update\"",
"]",
":",
"title",
"=",
"str",
"(",
"meta",
".",
"get",
"(",
"\"verbose_name\"",
")",
"or",
"meta",
".",
"name",
")",
"elif",
"self",
".",
"view",
".",
"action",
"==",
"\"create\"",
":",
"title",
"=",
"meta",
".",
"get",
"(",
"\"verbose_name\"",
")",
"if",
"title",
":",
"title",
"=",
"str",
"(",
"_",
"(",
"\"Add\"",
")",
")",
"+",
"\" \"",
"+",
"str",
"(",
"title",
")",
".",
"lower",
"(",
")",
"else",
":",
"title",
"=",
"meta",
".",
"name",
"elif",
"self",
".",
"view",
".",
"action",
"==",
"\"list\"",
":",
"title",
"=",
"str",
"(",
"meta",
".",
"get",
"(",
"\"verbose_name_plural\"",
")",
"or",
"meta",
".",
"name",
")",
"else",
":",
"title",
"=",
"str",
"(",
"meta",
".",
"name",
")",
"return",
"title"
]
| Compat: drf-yasg 1.11 | [
"Compat",
":",
"drf",
"-",
"yasg",
"1",
".",
"11"
]
| python | test |
tcalmant/ipopo | pelix/services/configadmin.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/services/configadmin.py#L266-L296 | def update(self, properties=None):
# pylint: disable=W0212
"""
If called without properties, only notifies listeners
Update the properties of this Configuration object.
Stores the properties in persistent storage after adding or overwriting
the following properties:
* "service.pid" : is set to be the PID of this configuration.
* "service.factoryPid" : if this is a factory configuration it is set
to the factory PID else it is not set.
These system properties are all of type String.
If the corresponding Managed Service/Managed Service Factory is
registered, its updated method must be called asynchronously.
Else, this callback is delayed until aforementioned registration
occurs.
Also initiates an asynchronous call to all ConfigurationListeners with
a ConfigurationEvent.CM_UPDATED event.
:param properties: the new set of properties for this configuration
:raise IOError: Error storing the configuration
"""
with self.__lock:
# Update properties
if self.__properties_update(properties):
# Update configurations, if something changed
self.__config_admin._update(self) | [
"def",
"update",
"(",
"self",
",",
"properties",
"=",
"None",
")",
":",
"# pylint: disable=W0212",
"with",
"self",
".",
"__lock",
":",
"# Update properties",
"if",
"self",
".",
"__properties_update",
"(",
"properties",
")",
":",
"# Update configurations, if something changed",
"self",
".",
"__config_admin",
".",
"_update",
"(",
"self",
")"
]
| If called without properties, only notifies listeners
Update the properties of this Configuration object.
Stores the properties in persistent storage after adding or overwriting
the following properties:
* "service.pid" : is set to be the PID of this configuration.
* "service.factoryPid" : if this is a factory configuration it is set
to the factory PID else it is not set.
These system properties are all of type String.
If the corresponding Managed Service/Managed Service Factory is
registered, its updated method must be called asynchronously.
Else, this callback is delayed until aforementioned registration
occurs.
Also initiates an asynchronous call to all ConfigurationListeners with
a ConfigurationEvent.CM_UPDATED event.
:param properties: the new set of properties for this configuration
:raise IOError: Error storing the configuration | [
"If",
"called",
"without",
"properties",
"only",
"notifies",
"listeners"
]
| python | train |
edoburu/django-any-urlfield | any_urlfield/registry.py | https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/registry.py#L57-L72 | def get_widget(self):
"""
Create the widget for the URL type.
"""
form_field = self.get_form_field()
widget = form_field.widget
if isinstance(widget, type):
widget = widget()
# Widget instantiation needs to happen manually.
# Auto skip if choices is not an existing attribute.
form_field_choices = getattr(form_field, 'choices', None)
if form_field_choices is not None:
if hasattr(widget, 'choices'):
widget.choices = form_field_choices
return widget | [
"def",
"get_widget",
"(",
"self",
")",
":",
"form_field",
"=",
"self",
".",
"get_form_field",
"(",
")",
"widget",
"=",
"form_field",
".",
"widget",
"if",
"isinstance",
"(",
"widget",
",",
"type",
")",
":",
"widget",
"=",
"widget",
"(",
")",
"# Widget instantiation needs to happen manually.",
"# Auto skip if choices is not an existing attribute.",
"form_field_choices",
"=",
"getattr",
"(",
"form_field",
",",
"'choices'",
",",
"None",
")",
"if",
"form_field_choices",
"is",
"not",
"None",
":",
"if",
"hasattr",
"(",
"widget",
",",
"'choices'",
")",
":",
"widget",
".",
"choices",
"=",
"form_field_choices",
"return",
"widget"
]
| Create the widget for the URL type. | [
"Create",
"the",
"widget",
"for",
"the",
"URL",
"type",
"."
]
| python | train |
rodluger/everest | everest/user.py | https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L850-L925 | def plot_pipeline(self, pipeline, *args, **kwargs):
'''
Plots the light curve for the target de-trended with a given pipeline.
:param str pipeline: The name of the pipeline (lowercase). Options \
are 'everest2', 'everest1', and other mission-specific \
pipelines. For `K2`, the available pipelines are 'k2sff' \
and 'k2sc'.
Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.plot` function of the mission.
'''
if pipeline != 'everest2':
return getattr(missions, self.mission).pipelines.plot(self.ID,
pipeline,
*args,
**kwargs)
else:
# We're going to plot the everest 2 light curve like we plot
# the other pipelines for easy comparison
plot_raw = kwargs.get('plot_raw', False)
plot_cbv = kwargs.get('plot_cbv', True)
show = kwargs.get('show', True)
if plot_raw:
y = self.fraw
ylabel = 'Raw Flux'
elif plot_cbv:
y = self.fcor
ylabel = "EVEREST2 Flux"
else:
y = self.flux
ylabel = "EVEREST2 Flux"
# Remove nans
bnmask = np.concatenate([self.nanmask, self.badmask])
time = np.delete(self.time, bnmask)
flux = np.delete(y, bnmask)
# Plot it
fig, ax = pl.subplots(1, figsize=(10, 4))
fig.subplots_adjust(bottom=0.15)
ax.plot(time, flux, "k.", markersize=3, alpha=0.5)
# Axis limits
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Plot bad data points
ax.plot(self.time[self.badmask], y[self.badmask],
"r.", markersize=3, alpha=0.2)
# Show the CDPP
ax.annotate('%.2f ppm' % self._mission.CDPP(flux),
xy=(0.98, 0.975), xycoords='axes fraction',
ha='right', va='top', fontsize=12, color='r',
zorder=99)
# Appearance
ax.margins(0, None)
ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID))
if show:
pl.show()
pl.close()
else:
return fig, ax | [
"def",
"plot_pipeline",
"(",
"self",
",",
"pipeline",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pipeline",
"!=",
"'everest2'",
":",
"return",
"getattr",
"(",
"missions",
",",
"self",
".",
"mission",
")",
".",
"pipelines",
".",
"plot",
"(",
"self",
".",
"ID",
",",
"pipeline",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# We're going to plot the everest 2 light curve like we plot",
"# the other pipelines for easy comparison",
"plot_raw",
"=",
"kwargs",
".",
"get",
"(",
"'plot_raw'",
",",
"False",
")",
"plot_cbv",
"=",
"kwargs",
".",
"get",
"(",
"'plot_cbv'",
",",
"True",
")",
"show",
"=",
"kwargs",
".",
"get",
"(",
"'show'",
",",
"True",
")",
"if",
"plot_raw",
":",
"y",
"=",
"self",
".",
"fraw",
"ylabel",
"=",
"'Raw Flux'",
"elif",
"plot_cbv",
":",
"y",
"=",
"self",
".",
"fcor",
"ylabel",
"=",
"\"EVEREST2 Flux\"",
"else",
":",
"y",
"=",
"self",
".",
"flux",
"ylabel",
"=",
"\"EVEREST2 Flux\"",
"# Remove nans",
"bnmask",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"nanmask",
",",
"self",
".",
"badmask",
"]",
")",
"time",
"=",
"np",
".",
"delete",
"(",
"self",
".",
"time",
",",
"bnmask",
")",
"flux",
"=",
"np",
".",
"delete",
"(",
"y",
",",
"bnmask",
")",
"# Plot it",
"fig",
",",
"ax",
"=",
"pl",
".",
"subplots",
"(",
"1",
",",
"figsize",
"=",
"(",
"10",
",",
"4",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"bottom",
"=",
"0.15",
")",
"ax",
".",
"plot",
"(",
"time",
",",
"flux",
",",
"\"k.\"",
",",
"markersize",
"=",
"3",
",",
"alpha",
"=",
"0.5",
")",
"# Axis limits",
"N",
"=",
"int",
"(",
"0.995",
"*",
"len",
"(",
"flux",
")",
")",
"hi",
",",
"lo",
"=",
"flux",
"[",
"np",
".",
"argsort",
"(",
"flux",
")",
"]",
"[",
"[",
"N",
",",
"-",
"N",
"]",
"]",
"pad",
"=",
"(",
"hi",
"-",
"lo",
")",
"*",
"0.1",
"ylim",
"=",
"(",
"lo",
"-",
"pad",
",",
"hi",
"+",
"pad",
")",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"# Plot bad data points",
"ax",
".",
"plot",
"(",
"self",
".",
"time",
"[",
"self",
".",
"badmask",
"]",
",",
"y",
"[",
"self",
".",
"badmask",
"]",
",",
"\"r.\"",
",",
"markersize",
"=",
"3",
",",
"alpha",
"=",
"0.2",
")",
"# Show the CDPP",
"ax",
".",
"annotate",
"(",
"'%.2f ppm'",
"%",
"self",
".",
"_mission",
".",
"CDPP",
"(",
"flux",
")",
",",
"xy",
"=",
"(",
"0.98",
",",
"0.975",
")",
",",
"xycoords",
"=",
"'axes fraction'",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
",",
"fontsize",
"=",
"12",
",",
"color",
"=",
"'r'",
",",
"zorder",
"=",
"99",
")",
"# Appearance",
"ax",
".",
"margins",
"(",
"0",
",",
"None",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Time (%s)\"",
"%",
"self",
".",
"_mission",
".",
"TIMEUNITS",
",",
"fontsize",
"=",
"16",
")",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
",",
"fontsize",
"=",
"16",
")",
"fig",
".",
"canvas",
".",
"set_window_title",
"(",
"\"EVEREST2: EPIC %d\"",
"%",
"(",
"self",
".",
"ID",
")",
")",
"if",
"show",
":",
"pl",
".",
"show",
"(",
")",
"pl",
".",
"close",
"(",
")",
"else",
":",
"return",
"fig",
",",
"ax"
]
| Plots the light curve for the target de-trended with a given pipeline.
:param str pipeline: The name of the pipeline (lowercase). Options \
are 'everest2', 'everest1', and other mission-specific \
pipelines. For `K2`, the available pipelines are 'k2sff' \
and 'k2sc'.
Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.plot` function of the mission. | [
"Plots",
"the",
"light",
"curve",
"for",
"the",
"target",
"de",
"-",
"trended",
"with",
"a",
"given",
"pipeline",
"."
]
| python | train |
saltstack/salt | salt/modules/flatpak.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/flatpak.py#L82-L109 | def uninstall(pkg):
'''
Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP
'''
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' uninstall ' + pkg)
if out['retcode'] and out['stderr']:
ret['stderr'] = out['stderr'].strip()
ret['result'] = False
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret | [
"def",
"uninstall",
"(",
"pkg",
")",
":",
"ret",
"=",
"{",
"'result'",
":",
"None",
",",
"'output'",
":",
"''",
"}",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"FLATPAK_BINARY_NAME",
"+",
"' uninstall '",
"+",
"pkg",
")",
"if",
"out",
"[",
"'retcode'",
"]",
"and",
"out",
"[",
"'stderr'",
"]",
":",
"ret",
"[",
"'stderr'",
"]",
"=",
"out",
"[",
"'stderr'",
"]",
".",
"strip",
"(",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"else",
":",
"ret",
"[",
"'stdout'",
"]",
"=",
"out",
"[",
"'stdout'",
"]",
".",
"strip",
"(",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret"
]
| Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP | [
"Uninstall",
"the",
"specified",
"package",
"."
]
| python | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/parser_context.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/parser_context.py#L86-L91 | def prefixedname_to_iriref(self, prefix: ShExDocParser.PrefixedNameContext) -> ShExJ.IRIREF:
""" prefixedName: PNAME_LN | PNAME_NS
PNAME_NS: PN_PREFIX? ':' ;
PNAME_LN: PNAME_NS PN_LOCAL ;
"""
return ShExJ.IRIREF(self.prefixedname_to_str(prefix)) | [
"def",
"prefixedname_to_iriref",
"(",
"self",
",",
"prefix",
":",
"ShExDocParser",
".",
"PrefixedNameContext",
")",
"->",
"ShExJ",
".",
"IRIREF",
":",
"return",
"ShExJ",
".",
"IRIREF",
"(",
"self",
".",
"prefixedname_to_str",
"(",
"prefix",
")",
")"
]
| prefixedName: PNAME_LN | PNAME_NS
PNAME_NS: PN_PREFIX? ':' ;
PNAME_LN: PNAME_NS PN_LOCAL ; | [
"prefixedName",
":",
"PNAME_LN",
"|",
"PNAME_NS",
"PNAME_NS",
":",
"PN_PREFIX?",
":",
";",
"PNAME_LN",
":",
"PNAME_NS",
"PN_LOCAL",
";"
]
| python | train |
aegirhall/console-menu | consolemenu/menu_formatter.py | https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/menu_formatter.py#L107-L114 | def set_top_margin(self, top_margin):
"""
Set the top margin of the menu. This will determine the number of console lines between the top edge
of the screen and the top menu border.
:param top_margin: an integer value
"""
self.__header.style.margins.top = top_margin
return self | [
"def",
"set_top_margin",
"(",
"self",
",",
"top_margin",
")",
":",
"self",
".",
"__header",
".",
"style",
".",
"margins",
".",
"top",
"=",
"top_margin",
"return",
"self"
]
| Set the top margin of the menu. This will determine the number of console lines between the top edge
of the screen and the top menu border.
:param top_margin: an integer value | [
"Set",
"the",
"top",
"margin",
"of",
"the",
"menu",
".",
"This",
"will",
"determine",
"the",
"number",
"of",
"console",
"lines",
"between",
"the",
"top",
"edge",
"of",
"the",
"screen",
"and",
"the",
"top",
"menu",
"border",
".",
":",
"param",
"top_margin",
":",
"an",
"integer",
"value"
]
| python | train |
jciskey/pygraph | pygraph/functions/planarity/kocay_algorithm.py | https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/planarity/kocay_algorithm.py#L490-L510 | def merge_Fm(dfs_data):
"""Merges Fm-1 and Fm, as defined on page 19 of the paper."""
FG = dfs_data['FG']
m = FG['m']
FGm = FG[m]
FGm1 = FG[m-1]
if FGm[0]['u'] < FGm1[0]['u']:
FGm1[0]['u'] = FGm[0]['u']
if FGm[0]['v'] > FGm1[0]['v']:
FGm1[0]['v'] = FGm[0]['v']
if FGm[1]['x'] < FGm1[1]['x']:
FGm1[1]['x'] = FGm[1]['x']
if FGm[1]['y'] > FGm1[1]['y']:
FGm1[1]['y'] = FGm[1]['y']
del FG[m]
FG['m'] -= 1 | [
"def",
"merge_Fm",
"(",
"dfs_data",
")",
":",
"FG",
"=",
"dfs_data",
"[",
"'FG'",
"]",
"m",
"=",
"FG",
"[",
"'m'",
"]",
"FGm",
"=",
"FG",
"[",
"m",
"]",
"FGm1",
"=",
"FG",
"[",
"m",
"-",
"1",
"]",
"if",
"FGm",
"[",
"0",
"]",
"[",
"'u'",
"]",
"<",
"FGm1",
"[",
"0",
"]",
"[",
"'u'",
"]",
":",
"FGm1",
"[",
"0",
"]",
"[",
"'u'",
"]",
"=",
"FGm",
"[",
"0",
"]",
"[",
"'u'",
"]",
"if",
"FGm",
"[",
"0",
"]",
"[",
"'v'",
"]",
">",
"FGm1",
"[",
"0",
"]",
"[",
"'v'",
"]",
":",
"FGm1",
"[",
"0",
"]",
"[",
"'v'",
"]",
"=",
"FGm",
"[",
"0",
"]",
"[",
"'v'",
"]",
"if",
"FGm",
"[",
"1",
"]",
"[",
"'x'",
"]",
"<",
"FGm1",
"[",
"1",
"]",
"[",
"'x'",
"]",
":",
"FGm1",
"[",
"1",
"]",
"[",
"'x'",
"]",
"=",
"FGm",
"[",
"1",
"]",
"[",
"'x'",
"]",
"if",
"FGm",
"[",
"1",
"]",
"[",
"'y'",
"]",
">",
"FGm1",
"[",
"1",
"]",
"[",
"'y'",
"]",
":",
"FGm1",
"[",
"1",
"]",
"[",
"'y'",
"]",
"=",
"FGm",
"[",
"1",
"]",
"[",
"'y'",
"]",
"del",
"FG",
"[",
"m",
"]",
"FG",
"[",
"'m'",
"]",
"-=",
"1"
]
| Merges Fm-1 and Fm, as defined on page 19 of the paper. | [
"Merges",
"Fm",
"-",
"1",
"and",
"Fm",
"as",
"defined",
"on",
"page",
"19",
"of",
"the",
"paper",
"."
]
| python | train |
DistrictDataLabs/yellowbrick | yellowbrick/utils/nan_warnings.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/nan_warnings.py#L10-L44 | def filter_missing(X, y=None):
"""
Removes rows that contain np.nan values in data. If y is given,
X and y will be filtered together so that their shape remains identical.
For example, rows in X with nans will also remove rows in y, or rows in y
with np.nans will also remove corresponding rows in X.
Parameters
------------
X : array-like
Data in shape (m, n) that possibly contains np.nan values
y : array-like, optional
Data in shape (m, 1) that possibly contains np.nan values
Returns
--------
X' : np.array
Possibly transformed X with any row containing np.nan removed
y' : np.array
If y is given, will also return possibly transformed y to match the
shape of X'.
Notes
------
This function will return either a np.array if only X is passed or a tuple
if both X and y is passed. Because all return values are indexable, it is
important to recognize what is being passed to the function to determine
its output.
"""
if y is not None:
return filter_missing_X_and_y(X, y)
else:
return X[~np.isnan(X).any(axis=1)] | [
"def",
"filter_missing",
"(",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"y",
"is",
"not",
"None",
":",
"return",
"filter_missing_X_and_y",
"(",
"X",
",",
"y",
")",
"else",
":",
"return",
"X",
"[",
"~",
"np",
".",
"isnan",
"(",
"X",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]"
]
| Removes rows that contain np.nan values in data. If y is given,
X and y will be filtered together so that their shape remains identical.
For example, rows in X with nans will also remove rows in y, or rows in y
with np.nans will also remove corresponding rows in X.
Parameters
------------
X : array-like
Data in shape (m, n) that possibly contains np.nan values
y : array-like, optional
Data in shape (m, 1) that possibly contains np.nan values
Returns
--------
X' : np.array
Possibly transformed X with any row containing np.nan removed
y' : np.array
If y is given, will also return possibly transformed y to match the
shape of X'.
Notes
------
This function will return either a np.array if only X is passed or a tuple
if both X and y is passed. Because all return values are indexable, it is
important to recognize what is being passed to the function to determine
its output. | [
"Removes",
"rows",
"that",
"contain",
"np",
".",
"nan",
"values",
"in",
"data",
".",
"If",
"y",
"is",
"given",
"X",
"and",
"y",
"will",
"be",
"filtered",
"together",
"so",
"that",
"their",
"shape",
"remains",
"identical",
".",
"For",
"example",
"rows",
"in",
"X",
"with",
"nans",
"will",
"also",
"remove",
"rows",
"in",
"y",
"or",
"rows",
"in",
"y",
"with",
"np",
".",
"nans",
"will",
"also",
"remove",
"corresponding",
"rows",
"in",
"X",
"."
]
| python | train |
ronaldguillen/wave | wave/request.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L308-L326 | def _authenticate(self):
"""
Attempt to authenticate the request using each authentication instance
in turn.
Returns a three-tuple of (authenticator, user, authtoken).
"""
for authenticator in self.authenticators:
try:
user_auth_tuple = authenticator.authenticate(self)
except exceptions.APIException:
self._not_authenticated()
raise
if user_auth_tuple is not None:
self._authenticator = authenticator
self.user, self.auth = user_auth_tuple
return
self._not_authenticated() | [
"def",
"_authenticate",
"(",
"self",
")",
":",
"for",
"authenticator",
"in",
"self",
".",
"authenticators",
":",
"try",
":",
"user_auth_tuple",
"=",
"authenticator",
".",
"authenticate",
"(",
"self",
")",
"except",
"exceptions",
".",
"APIException",
":",
"self",
".",
"_not_authenticated",
"(",
")",
"raise",
"if",
"user_auth_tuple",
"is",
"not",
"None",
":",
"self",
".",
"_authenticator",
"=",
"authenticator",
"self",
".",
"user",
",",
"self",
".",
"auth",
"=",
"user_auth_tuple",
"return",
"self",
".",
"_not_authenticated",
"(",
")"
]
| Attempt to authenticate the request using each authentication instance
in turn.
Returns a three-tuple of (authenticator, user, authtoken). | [
"Attempt",
"to",
"authenticate",
"the",
"request",
"using",
"each",
"authentication",
"instance",
"in",
"turn",
".",
"Returns",
"a",
"three",
"-",
"tuple",
"of",
"(",
"authenticator",
"user",
"authtoken",
")",
"."
]
| python | train |
fractalego/parvusdb | parvusdb/utils/code_container.py | https://github.com/fractalego/parvusdb/blob/d5e818d3f3c3decfd4835ef2133aa956b6d87b1d/parvusdb/utils/code_container.py#L68-L90 | def substitute_namespace_into_graph(self, graph):
"""
Creates a graph from the local namespace of the code (to be used after the execution of the code)
:param graph: The graph to use as a recipient of the namespace
:return: the updated graph
"""
for key, value in self.namespace.items():
try:
nodes = graph.vs.select(name=key)
for node in nodes:
for k, v in value.items():
node[k] = v
except:
pass
try:
nodes = graph.es.select(name=key)
for node in nodes:
for k, v in value.items():
node[k] = v
except:
pass
return graph | [
"def",
"substitute_namespace_into_graph",
"(",
"self",
",",
"graph",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"namespace",
".",
"items",
"(",
")",
":",
"try",
":",
"nodes",
"=",
"graph",
".",
"vs",
".",
"select",
"(",
"name",
"=",
"key",
")",
"for",
"node",
"in",
"nodes",
":",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"node",
"[",
"k",
"]",
"=",
"v",
"except",
":",
"pass",
"try",
":",
"nodes",
"=",
"graph",
".",
"es",
".",
"select",
"(",
"name",
"=",
"key",
")",
"for",
"node",
"in",
"nodes",
":",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"node",
"[",
"k",
"]",
"=",
"v",
"except",
":",
"pass",
"return",
"graph"
]
| Creates a graph from the local namespace of the code (to be used after the execution of the code)
:param graph: The graph to use as a recipient of the namespace
:return: the updated graph | [
"Creates",
"a",
"graph",
"from",
"the",
"local",
"namespace",
"of",
"the",
"code",
"(",
"to",
"be",
"used",
"after",
"the",
"execution",
"of",
"the",
"code",
")"
]
| python | train |
vmonaco/pohmm | pohmm/utils.py | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L106-L119 | def gen_stochastic_matrix(size, random_state=None):
"""
Generate a unfiformly-random stochastic array or matrix
"""
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze() | [
"def",
"gen_stochastic_matrix",
"(",
"size",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"not",
"type",
"(",
"size",
")",
"is",
"tuple",
":",
"size",
"=",
"(",
"1",
",",
"size",
")",
"assert",
"len",
"(",
"size",
")",
"==",
"2",
"n",
"=",
"random_state",
".",
"uniform",
"(",
"size",
"=",
"(",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
"-",
"1",
")",
")",
"n",
"=",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"zeros",
"(",
"(",
"size",
"[",
"0",
"]",
",",
"1",
")",
")",
",",
"n",
",",
"np",
".",
"ones",
"(",
"(",
"size",
"[",
"0",
"]",
",",
"1",
")",
")",
"]",
",",
"axis",
"=",
"1",
")",
"A",
"=",
"np",
".",
"diff",
"(",
"np",
".",
"sort",
"(",
"n",
")",
")",
"return",
"A",
".",
"squeeze",
"(",
")"
]
| Generate a unfiformly-random stochastic array or matrix | [
"Generate",
"a",
"unfiformly",
"-",
"random",
"stochastic",
"array",
"or",
"matrix"
]
| python | train |
keenlabs/KeenClient-Python | keen/saved_queries.py | https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/saved_queries.py#L48-L58 | def results(self, query_name):
"""
Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set.
"""
url = "{0}/{1}/result".format(self.saved_query_url, query_name)
response = self._get_json(HTTPMethods.GET, url, self._get_read_key())
return response | [
"def",
"results",
"(",
"self",
",",
"query_name",
")",
":",
"url",
"=",
"\"{0}/{1}/result\"",
".",
"format",
"(",
"self",
".",
"saved_query_url",
",",
"query_name",
")",
"response",
"=",
"self",
".",
"_get_json",
"(",
"HTTPMethods",
".",
"GET",
",",
"url",
",",
"self",
".",
"_get_read_key",
"(",
")",
")",
"return",
"response"
]
| Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set. | [
"Gets",
"a",
"single",
"saved",
"query",
"with",
"a",
"result",
"object",
"for",
"a",
"project",
"from",
"the",
"Keen",
"IO",
"API",
"given",
"a",
"query",
"name",
".",
"Read",
"or",
"Master",
"key",
"must",
"be",
"set",
"."
]
| python | train |
HPENetworking/PYHPEIMC | archived/pyhpimc.py | https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/archived/pyhpimc.py#L543-L566 | def create_dev_vlan(devid, vlanid, vlan_name):
"""
function takes devid and vlanid vlan_name of specific device and 802.1q VLAN tag and issues a RESTFUL call to add the
specified VLAN from the target device. VLAN Name MUST be valid on target device.
:param devid: int or str value of the target device
:param vlanid:int or str value of target 802.1q VLAN
:param vlan_name: str value of the target 802.1q VLAN name. MUST be valid name on target device.
:return:HTTP Status code of 201 with no values.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
create_dev_vlan_url = "/imcrs/vlan?devId=" + str(devid)
f_url = url + create_dev_vlan_url
payload = '''{ "vlanId": "''' + str(vlanid) + '''", "vlanName" : "''' + str(vlan_name) + '''"}'''
r = requests.post(f_url, data=payload, auth=auth,
headers=headers) # creates the URL using the payload variable as the contents
print (r.status_code)
if r.status_code == 201:
print ('Vlan Created')
return r.status_code
elif r.status_code == 409:
return '''Unable to create VLAN.\nVLAN Already Exists\nDevice does not support VLAN function'''
else:
print("An Error has occured") | [
"def",
"create_dev_vlan",
"(",
"devid",
",",
"vlanid",
",",
"vlan_name",
")",
":",
"if",
"auth",
"is",
"None",
"or",
"url",
"is",
"None",
":",
"# checks to see if the imc credentials are already available",
"set_imc_creds",
"(",
")",
"create_dev_vlan_url",
"=",
"\"/imcrs/vlan?devId=\"",
"+",
"str",
"(",
"devid",
")",
"f_url",
"=",
"url",
"+",
"create_dev_vlan_url",
"payload",
"=",
"'''{ \"vlanId\": \"'''",
"+",
"str",
"(",
"vlanid",
")",
"+",
"'''\", \"vlanName\" : \"'''",
"+",
"str",
"(",
"vlan_name",
")",
"+",
"'''\"}'''",
"r",
"=",
"requests",
".",
"post",
"(",
"f_url",
",",
"data",
"=",
"payload",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
")",
"# creates the URL using the payload variable as the contents",
"print",
"(",
"r",
".",
"status_code",
")",
"if",
"r",
".",
"status_code",
"==",
"201",
":",
"print",
"(",
"'Vlan Created'",
")",
"return",
"r",
".",
"status_code",
"elif",
"r",
".",
"status_code",
"==",
"409",
":",
"return",
"'''Unable to create VLAN.\\nVLAN Already Exists\\nDevice does not support VLAN function'''",
"else",
":",
"print",
"(",
"\"An Error has occured\"",
")"
]
| function takes devid and vlanid vlan_name of specific device and 802.1q VLAN tag and issues a RESTFUL call to add the
specified VLAN from the target device. VLAN Name MUST be valid on target device.
:param devid: int or str value of the target device
:param vlanid:int or str value of target 802.1q VLAN
:param vlan_name: str value of the target 802.1q VLAN name. MUST be valid name on target device.
:return:HTTP Status code of 201 with no values. | [
"function",
"takes",
"devid",
"and",
"vlanid",
"vlan_name",
"of",
"specific",
"device",
"and",
"802",
".",
"1q",
"VLAN",
"tag",
"and",
"issues",
"a",
"RESTFUL",
"call",
"to",
"add",
"the",
"specified",
"VLAN",
"from",
"the",
"target",
"device",
".",
"VLAN",
"Name",
"MUST",
"be",
"valid",
"on",
"target",
"device",
".",
":",
"param",
"devid",
":",
"int",
"or",
"str",
"value",
"of",
"the",
"target",
"device",
":",
"param",
"vlanid",
":",
"int",
"or",
"str",
"value",
"of",
"target",
"802",
".",
"1q",
"VLAN",
":",
"param",
"vlan_name",
":",
"str",
"value",
"of",
"the",
"target",
"802",
".",
"1q",
"VLAN",
"name",
".",
"MUST",
"be",
"valid",
"name",
"on",
"target",
"device",
".",
":",
"return",
":",
"HTTP",
"Status",
"code",
"of",
"201",
"with",
"no",
"values",
"."
]
| python | train |
ebu/PlugIt | plugit_proxy/views.py | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L495-L530 | def build_final_response(request, meta, result, menu, hproject, proxyMode, context):
"""Build the final response to send back to the browser"""
if 'no_template' in meta and meta['no_template']: # Just send the json back
return HttpResponse(result)
# TODO this breaks pages not using new template
# Add sidebar toggler if plugit did not add by itself
# if not "sidebar-toggler" in result:
# result = "<div class=\"menubar\"><div class=\"sidebar-toggler visible-xs\"><i class=\"ion-navicon\"></i></div></div>" + result
# render the template into the whole page
if not settings.PIAPI_STANDALONE:
return render_to_response('plugIt/' + hproject.get_plugItTemplate_display(),
{"project": hproject,
"plugit_content": result,
"plugit_menu": menu,
'context': context},
context_instance=RequestContext(request))
if proxyMode: # Force inclusion inside template
return render_to_response('plugIt/base.html',
{'plugit_content': result,
"plugit_menu": menu,
'context': context},
context_instance=RequestContext(request))
renderPlugItTemplate = 'plugItBase.html'
if settings.PIAPI_PLUGITTEMPLATE:
renderPlugItTemplate = settings.PIAPI_PLUGITTEMPLATE
return render_to_response('plugIt/' + renderPlugItTemplate,
{"plugit_content": result,
"plugit_menu": menu,
'context': context},
context_instance=RequestContext(request)) | [
"def",
"build_final_response",
"(",
"request",
",",
"meta",
",",
"result",
",",
"menu",
",",
"hproject",
",",
"proxyMode",
",",
"context",
")",
":",
"if",
"'no_template'",
"in",
"meta",
"and",
"meta",
"[",
"'no_template'",
"]",
":",
"# Just send the json back",
"return",
"HttpResponse",
"(",
"result",
")",
"# TODO this breaks pages not using new template",
"# Add sidebar toggler if plugit did not add by itself",
"# if not \"sidebar-toggler\" in result:",
"# result = \"<div class=\\\"menubar\\\"><div class=\\\"sidebar-toggler visible-xs\\\"><i class=\\\"ion-navicon\\\"></i></div></div>\" + result",
"# render the template into the whole page",
"if",
"not",
"settings",
".",
"PIAPI_STANDALONE",
":",
"return",
"render_to_response",
"(",
"'plugIt/'",
"+",
"hproject",
".",
"get_plugItTemplate_display",
"(",
")",
",",
"{",
"\"project\"",
":",
"hproject",
",",
"\"plugit_content\"",
":",
"result",
",",
"\"plugit_menu\"",
":",
"menu",
",",
"'context'",
":",
"context",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")",
"if",
"proxyMode",
":",
"# Force inclusion inside template",
"return",
"render_to_response",
"(",
"'plugIt/base.html'",
",",
"{",
"'plugit_content'",
":",
"result",
",",
"\"plugit_menu\"",
":",
"menu",
",",
"'context'",
":",
"context",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")",
"renderPlugItTemplate",
"=",
"'plugItBase.html'",
"if",
"settings",
".",
"PIAPI_PLUGITTEMPLATE",
":",
"renderPlugItTemplate",
"=",
"settings",
".",
"PIAPI_PLUGITTEMPLATE",
"return",
"render_to_response",
"(",
"'plugIt/'",
"+",
"renderPlugItTemplate",
",",
"{",
"\"plugit_content\"",
":",
"result",
",",
"\"plugit_menu\"",
":",
"menu",
",",
"'context'",
":",
"context",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
]
| Build the final response to send back to the browser | [
"Build",
"the",
"final",
"response",
"to",
"send",
"back",
"to",
"the",
"browser"
]
| python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/xgboost/_tree_ensemble.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/xgboost/_tree_ensemble.py#L75-L156 | def convert_tree_ensemble(model, feature_names, target, force_32bit_float):
"""Convert a generic tree model to the protobuf spec.
This currently supports:
* Decision tree regression
Parameters
----------
model: str | Booster
Path on disk where the XGboost JSON representation of the model is or
a handle to the XGboost model.
feature_names : list of strings or None
Names of each of the features. When set to None, the feature names are
extracted from the model.
target: str,
Name of the output column.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_XGBOOST):
raise RuntimeError('xgboost not found. xgboost conversion API is disabled.')
import json
import os
feature_map = None
if isinstance(model, (_xgboost.core.Booster, _xgboost.XGBRegressor)):
# Testing a few corner cases that we don't support
if isinstance(model, _xgboost.XGBRegressor):
try:
objective = model.get_xgb_params()["objective"]
except:
objective = None
if objective in ["reg:gamma", "reg:tweedie"]:
raise ValueError("Regression objective '%s' not supported for export." % objective)
# Now use the booster API.
if isinstance(model, _xgboost.XGBRegressor):
# Name change in 0.7
if hasattr(model, 'get_booster'):
model = model.get_booster()
else:
model = model.booster()
# Xgboost sometimes has feature names in there. Sometimes does not.
if (feature_names is None) and (model.feature_names is None):
raise ValueError("Feature names not present in the model. Must be provided during conversion.")
feature_names = model.feature_names
if feature_names is None:
feature_names = model.feature_names
xgb_model_str = model.get_dump(with_stats=True, dump_format = 'json')
if model.feature_names:
feature_map = {f:i for i,f in enumerate(model.feature_names)}
# Path on the file system where the XGboost model exists.
elif isinstance(model, str):
if not os.path.exists(model):
raise TypeError("Invalid path %s." % model)
with open(model) as f:
xgb_model_str = json.load(f)
feature_map = {f:i for i,f in enumerate(feature_names)}
else:
raise TypeError("Unexpected type. Expecting XGBoost model.")
mlkit_tree = _TreeEnsembleRegressor(feature_names, target)
mlkit_tree.set_default_prediction_value(0.5)
for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str):
xgb_tree_json = json.loads(xgb_tree_str)
recurse_json(mlkit_tree, xgb_tree_json, xgb_tree_id, node_id = 0,
feature_map = feature_map, force_32bit_float = force_32bit_float)
return mlkit_tree.spec | [
"def",
"convert_tree_ensemble",
"(",
"model",
",",
"feature_names",
",",
"target",
",",
"force_32bit_float",
")",
":",
"if",
"not",
"(",
"_HAS_XGBOOST",
")",
":",
"raise",
"RuntimeError",
"(",
"'xgboost not found. xgboost conversion API is disabled.'",
")",
"import",
"json",
"import",
"os",
"feature_map",
"=",
"None",
"if",
"isinstance",
"(",
"model",
",",
"(",
"_xgboost",
".",
"core",
".",
"Booster",
",",
"_xgboost",
".",
"XGBRegressor",
")",
")",
":",
"# Testing a few corner cases that we don't support",
"if",
"isinstance",
"(",
"model",
",",
"_xgboost",
".",
"XGBRegressor",
")",
":",
"try",
":",
"objective",
"=",
"model",
".",
"get_xgb_params",
"(",
")",
"[",
"\"objective\"",
"]",
"except",
":",
"objective",
"=",
"None",
"if",
"objective",
"in",
"[",
"\"reg:gamma\"",
",",
"\"reg:tweedie\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"Regression objective '%s' not supported for export.\"",
"%",
"objective",
")",
"# Now use the booster API.",
"if",
"isinstance",
"(",
"model",
",",
"_xgboost",
".",
"XGBRegressor",
")",
":",
"# Name change in 0.7",
"if",
"hasattr",
"(",
"model",
",",
"'get_booster'",
")",
":",
"model",
"=",
"model",
".",
"get_booster",
"(",
")",
"else",
":",
"model",
"=",
"model",
".",
"booster",
"(",
")",
"# Xgboost sometimes has feature names in there. Sometimes does not.",
"if",
"(",
"feature_names",
"is",
"None",
")",
"and",
"(",
"model",
".",
"feature_names",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Feature names not present in the model. Must be provided during conversion.\"",
")",
"feature_names",
"=",
"model",
".",
"feature_names",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"model",
".",
"feature_names",
"xgb_model_str",
"=",
"model",
".",
"get_dump",
"(",
"with_stats",
"=",
"True",
",",
"dump_format",
"=",
"'json'",
")",
"if",
"model",
".",
"feature_names",
":",
"feature_map",
"=",
"{",
"f",
":",
"i",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"model",
".",
"feature_names",
")",
"}",
"# Path on the file system where the XGboost model exists.",
"elif",
"isinstance",
"(",
"model",
",",
"str",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid path %s.\"",
"%",
"model",
")",
"with",
"open",
"(",
"model",
")",
"as",
"f",
":",
"xgb_model_str",
"=",
"json",
".",
"load",
"(",
"f",
")",
"feature_map",
"=",
"{",
"f",
":",
"i",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"feature_names",
")",
"}",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unexpected type. Expecting XGBoost model.\"",
")",
"mlkit_tree",
"=",
"_TreeEnsembleRegressor",
"(",
"feature_names",
",",
"target",
")",
"mlkit_tree",
".",
"set_default_prediction_value",
"(",
"0.5",
")",
"for",
"xgb_tree_id",
",",
"xgb_tree_str",
"in",
"enumerate",
"(",
"xgb_model_str",
")",
":",
"xgb_tree_json",
"=",
"json",
".",
"loads",
"(",
"xgb_tree_str",
")",
"recurse_json",
"(",
"mlkit_tree",
",",
"xgb_tree_json",
",",
"xgb_tree_id",
",",
"node_id",
"=",
"0",
",",
"feature_map",
"=",
"feature_map",
",",
"force_32bit_float",
"=",
"force_32bit_float",
")",
"return",
"mlkit_tree",
".",
"spec"
]
| Convert a generic tree model to the protobuf spec.
This currently supports:
* Decision tree regression
Parameters
----------
model: str | Booster
Path on disk where the XGboost JSON representation of the model is or
a handle to the XGboost model.
feature_names : list of strings or None
Names of each of the features. When set to None, the feature names are
extracted from the model.
target: str,
Name of the output column.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"generic",
"tree",
"model",
"to",
"the",
"protobuf",
"spec",
"."
]
| python | train |
decryptus/httpdis | httpdis/httpdis.py | https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L1114-L1134 | def sigterm_handler(signum, stack_frame):
"""
Just tell the server to exit.
WARNING: There are race conditions, for example with TimeoutSocket.accept.
We don't care: the user can just rekill the process after like 1 sec. if
the first kill did not work.
"""
# pylint: disable-msg=W0613
global _KILLED
for name, cmd in _COMMANDS.iteritems():
if cmd.at_stop:
LOG.info("at_stop: %r", name)
cmd.at_stop()
_KILLED = True
if _HTTP_SERVER:
_HTTP_SERVER.kill()
_HTTP_SERVER.server_close() | [
"def",
"sigterm_handler",
"(",
"signum",
",",
"stack_frame",
")",
":",
"# pylint: disable-msg=W0613",
"global",
"_KILLED",
"for",
"name",
",",
"cmd",
"in",
"_COMMANDS",
".",
"iteritems",
"(",
")",
":",
"if",
"cmd",
".",
"at_stop",
":",
"LOG",
".",
"info",
"(",
"\"at_stop: %r\"",
",",
"name",
")",
"cmd",
".",
"at_stop",
"(",
")",
"_KILLED",
"=",
"True",
"if",
"_HTTP_SERVER",
":",
"_HTTP_SERVER",
".",
"kill",
"(",
")",
"_HTTP_SERVER",
".",
"server_close",
"(",
")"
]
| Just tell the server to exit.
WARNING: There are race conditions, for example with TimeoutSocket.accept.
We don't care: the user can just rekill the process after like 1 sec. if
the first kill did not work. | [
"Just",
"tell",
"the",
"server",
"to",
"exit",
"."
]
| python | train |
mdickinson/bigfloat | bigfloat/core.py | https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L512-L523 | def copy_neg(self):
""" Return a copy of self with the opposite sign bit.
Unlike -self, this does not make use of the context: the result
has the same precision as the original.
"""
result = mpfr.Mpfr_t.__new__(BigFloat)
mpfr.mpfr_init2(result, self.precision)
new_sign = not self._sign()
mpfr.mpfr_setsign(result, self, new_sign, ROUND_TIES_TO_EVEN)
return result | [
"def",
"copy_neg",
"(",
"self",
")",
":",
"result",
"=",
"mpfr",
".",
"Mpfr_t",
".",
"__new__",
"(",
"BigFloat",
")",
"mpfr",
".",
"mpfr_init2",
"(",
"result",
",",
"self",
".",
"precision",
")",
"new_sign",
"=",
"not",
"self",
".",
"_sign",
"(",
")",
"mpfr",
".",
"mpfr_setsign",
"(",
"result",
",",
"self",
",",
"new_sign",
",",
"ROUND_TIES_TO_EVEN",
")",
"return",
"result"
]
| Return a copy of self with the opposite sign bit.
Unlike -self, this does not make use of the context: the result
has the same precision as the original. | [
"Return",
"a",
"copy",
"of",
"self",
"with",
"the",
"opposite",
"sign",
"bit",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.